code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
SCREAMING_SNAKE_CASE__ : List[Any] = 50003
SCREAMING_SNAKE_CASE__ : Optional[Any] = 50002
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = PLBartTokenizer
lowerCamelCase_ : Dict = None
lowerCamelCase_ : Optional[Any] = False
def _lowercase ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : Union[str, Any] = PLBartTokenizer(UpperCamelCase__ , language_codes="base" , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ) -> Any:
lowerCamelCase : List[str] = PLBartTokenizer(UpperCamelCase__ , language_codes="base" , keep_accents=UpperCamelCase__ )
lowerCamelCase : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowerCamelCase : int = tokenizer.vocab_size
lowerCamelCase : Optional[Any] = [tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) for x in range(end - 4 , UpperCamelCase__ )]
self.assertListEqual(UpperCamelCase__ , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowerCamelCase : str = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowerCamelCase : Dict = tokenizer(UpperCamelCase__ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) , UpperCamelCase__ , )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Union[str, Any] = PLBartTokenizer(UpperCamelCase__ , language_codes="multi" , keep_accents=UpperCamelCase__ )
lowerCamelCase : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowerCamelCase : str = tokenizer.vocab_size
lowerCamelCase : Tuple = [tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) for x in range(end - 7 , UpperCamelCase__ )]
self.assertListEqual(
UpperCamelCase__ , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowerCamelCase : str = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowerCamelCase : Dict = tokenizer(UpperCamelCase__ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) , UpperCamelCase__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = """uclanlp/plbart-python-en_XX"""
lowerCamelCase_ : Optional[int] = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
lowerCamelCase_ : Any = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
lowerCamelCase_ : List[str] = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def _lowercase ( cls ) -> List[str]:
lowerCamelCase : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowerCamelCase : Optional[int] = 1
return cls
def _lowercase ( self ) -> Any:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0003 )
def _lowercase ( self ) -> int:
lowerCamelCase : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
def _lowercase ( self ) -> List[str]:
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
lowerCamelCase : Optional[int] = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
lowerCamelCase : Tuple = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def _lowercase ( self ) -> str:
lowerCamelCase : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , UpperCamelCase__ )
lowerCamelCase : Dict = 10
lowerCamelCase : List[str] = self.tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
def _lowercase ( self ) -> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0004, 5_0001] )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Optional[int] = tempfile.mkdtemp()
lowerCamelCase : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase : int = PLBartTokenizer.from_pretrained(UpperCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase__ )
@require_torch
def _lowercase ( self ) -> int:
lowerCamelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors="pt" )
lowerCamelCase : Optional[Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , UpperCamelCase__ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _lowercase ( self ) -> Dict:
lowerCamelCase : Any = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowerCamelCase : Optional[int] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowerCamelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Optional[Any] = self.tokenizer(self.src_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=3 , return_tensors="pt" )
lowerCamelCase : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10 , return_tensors="pt" )
lowerCamelCase : List[Any] = targets["input_ids"]
lowerCamelCase : Any = shift_tokens_right(UpperCamelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase ( self ) -> str:
lowerCamelCase : Optional[Any] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 5_0003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0001,
} , )
| 311
|
def A ( _SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : List[str] = len(_SCREAMING_SNAKE_CASE )
for i in range(1 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = collection[i]
lowerCamelCase : List[str] = 0
lowerCamelCase : List[Any] = i - 1
while low <= high:
lowerCamelCase : List[str] = (low + high) // 2
if val < collection[mid]:
lowerCamelCase : List[Any] = mid - 1
else:
lowerCamelCase : Optional[int] = mid + 1
for j in range(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,-1 ):
lowerCamelCase : Dict = collection[j - 1]
lowerCamelCase : Optional[Any] = val
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE__ : List[str] = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 311
| 1
|
'''simple docstring'''
import os
def snake_case_ ():
UpperCAmelCase = os.path.join(os.path.dirname(_a ) , '''num.txt''' )
with open(_a ) as file_hand:
return str(sum(int(_a ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 358
|
'''simple docstring'''
from __future__ import annotations
import math
class _a :
def __init__( self : Dict , lowercase : int ):
'''simple docstring'''
UpperCAmelCase = size
# approximate the overall size of segment tree with given value
UpperCAmelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase = [0 for i in range(0 , 4 * size )]
UpperCAmelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A ( self : List[str] , lowercase : int ):
'''simple docstring'''
return idx * 2
def A ( self : Optional[Any] , lowercase : int ):
'''simple docstring'''
return idx * 2 + 1
def A ( self : Dict , lowercase : int , lowercase : int , lowercase : int , lowercase : list[int] ):
'''simple docstring'''
if left_element == right_element:
UpperCAmelCase = a[left_element - 1]
else:
UpperCAmelCase = (left_element + right_element) // 2
self.build(self.left(lowercase ) , lowercase , lowercase , lowercase )
self.build(self.right(lowercase ) , mid + 1 , lowercase , lowercase )
UpperCAmelCase = max(
self.segment_tree[self.left(lowercase )] , self.segment_tree[self.right(lowercase )] )
def A ( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
if self.flag[idx] is True:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = False
if left_element != right_element:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = True
UpperCAmelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase = val
if left_element != right_element:
UpperCAmelCase = val
UpperCAmelCase = val
UpperCAmelCase = True
UpperCAmelCase = True
return True
UpperCAmelCase = (left_element + right_element) // 2
self.update(self.left(lowercase ) , lowercase , lowercase , lowercase , lowercase , lowercase )
self.update(self.right(lowercase ) , mid + 1 , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = max(
self.segment_tree[self.left(lowercase )] , self.segment_tree[self.right(lowercase )] )
return True
def A ( self : Any , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
if self.flag[idx] is True:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = False
if left_element != right_element:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = True
UpperCAmelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase = (left_element + right_element) // 2
UpperCAmelCase = self.query(self.left(lowercase ) , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = self.query(self.right(lowercase ) , mid + 1 , lowercase , lowercase , lowercase )
return max(lowercase , lowercase )
def __str__( self : Any ):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , lowercase , lowercase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
A =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
A =15
A =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt)
| 358
| 1
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_lowercase : List[Any] =[
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def lowerCAmelCase_ ( _lowercase : Optional[Any]) -> Any:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a__ : int = k.replace(_lowercase , _lowercase)
return k
def lowerCAmelCase_ ( _lowercase : dict , _lowercase : dict) -> PegasusForConditionalGeneration:
"""simple docstring"""
a__ : Tuple = DEFAULTS.copy()
cfg_kwargs.update(_lowercase)
a__ : Optional[Any] = PegasusConfig(**_lowercase)
a__ : str = PegasusForConditionalGeneration(_lowercase)
a__ : Dict = torch_model.model.state_dict()
a__ : Union[str, Any] = {}
for k, v in tf_weights.items():
a__ : List[str] = rename_state_dict_key(_lowercase)
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''')
if "dense" in k or "proj" in new_k:
a__ : List[Any] = v.T
a__ : Tuple = torch.tensor(_lowercase , dtype=sd[new_k].dtype)
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a__ : Union[str, Any] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1])
a__ : Tuple = mapping["""shared.weight"""]
a__ : Optional[int] = mapping["""shared.weight"""]
a__ : List[Any] = {k: torch.zeros_like(_lowercase) for k, v in sd.items() if k.endswith("""bias""") and k not in mapping}
mapping.update(**_lowercase)
a__ , a__ : Tuple = torch_model.model.load_state_dict(_lowercase , strict=_lowercase)
a__ : Optional[int] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCAmelCase_ ( _lowercase : List[Any]="./ckpt/aeslc/model.ckpt-32000") -> Dict:
"""simple docstring"""
a__ : List[str] = tf.train.list_variables(_lowercase)
a__ : int = {}
a__ : Optional[Any] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(_lowercase , desc="""converting tf checkpoint to dict"""):
a__ : Any = any(pat in name for pat in ignore_name)
if skip_key:
continue
a__ : Union[str, Any] = tf.train.load_variable(_lowercase , _lowercase)
a__ : str = array
return tf_weights
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> List[str]:
"""simple docstring"""
# save tokenizer first
a__ : Tuple = Path(_lowercase).parent.name
a__ : Union[str, Any] = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
a__ : str = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=_lowercase)
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_lowercase)
# convert model
a__ : Optional[int] = get_tf_weights_as_numpy(_lowercase)
a__ : List[str] = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a__ : Any = task_specific_params
a__ : str = convert_pegasus(_lowercase , _lowercase)
torch_model.save_pretrained(_lowercase)
a__ : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""")
sd.pop("""model.encoder.embed_positions.weight""")
torch.save(_lowercase , Path(_lowercase) / """pytorch_model.bin""")
if __name__ == "__main__":
_lowercase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowercase : Any =parser.parse_args()
if args.save_dir is None:
_lowercase : Optional[Any] =Path(args.tf_ckpt_path).parent.name
_lowercase : Union[str, Any] =os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 136
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[int] =16
_lowercase : List[str] =32
def lowerCAmelCase_ ( _lowercase : Accelerator , _lowercase : int = 16) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""")
a__ : Optional[int] = load_dataset("""glue""" , """mrpc""")
def tokenize_function(_lowercase : Optional[int]):
# max_length=None => use the model max length (it's actually the default)
a__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowercase , max_length=_lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : Optional[int] = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Any = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(_lowercase : int):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : List[Any] = 16
elif accelerator.mixed_precision != "no":
a__ : List[Any] = 8
else:
a__ : Any = None
return tokenizer.pad(
_lowercase , padding="""longest""" , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors="""pt""" , )
# Instantiate dataloaders.
a__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase , drop_last=_lowercase)
a__ : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : int) -> List[str]:
"""simple docstring"""
# Initialize accelerator
a__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : List[Any] = config["""lr"""]
a__ : int = int(config["""num_epochs"""])
a__ : Tuple = int(config["""seed"""])
a__ : Dict = int(config["""batch_size"""])
a__ : Dict = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
a__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
a__ : str = MAX_GPU_BATCH_SIZE
set_seed(_lowercase)
a__ , a__ : Any = get_dataloaders(_lowercase , _lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : Dict = model.to(accelerator.device)
# Instantiate optimizer
a__ : Any = AdamW(params=model.parameters() , lr=_lowercase)
# Instantiate scheduler
a__ : List[str] = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=100 , num_training_steps=(len(_lowercase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Tuple = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
# Now we train the model
for epoch in range(_lowercase):
model.train()
for step, batch in enumerate(_lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
a__ : List[str] = model(**_lowercase)
a__ : List[str] = outputs.loss
a__ : str = loss / gradient_accumulation_steps
accelerator.backward(_lowercase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
a__ : Any = model(**_lowercase)
a__ : Optional[Any] = outputs.logits.argmax(dim=-1)
a__ , a__ : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
a__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowercase)
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=_lowercase , default=_lowercase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
a__ : Optional[int] = parser.parse_args()
a__ : Optional[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowercase , _lowercase)
if __name__ == "__main__":
main()
| 136
| 1
|
from __future__ import annotations
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE = TypeVar('T')
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> None:
_A = data
_A = self
_A = 0
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
_A = {}
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> None:
_A = DisjointSetTreeNode(lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> DisjointSetTreeNode[T]:
_A = self.map[data]
if elem_ref != elem_ref.parent:
_A = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if nodea.rank > nodea.rank:
_A = nodea
else:
_A = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
self.link(self.find_set(lowerCamelCase__ ) , self.find_set(lowerCamelCase__ ) )
class a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
_A = {}
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> None:
if node not in self.connections:
_A = {}
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
self.add_node(lowerCamelCase__ )
self.add_node(lowerCamelCase__ )
_A = weight
_A = weight
def UpperCAmelCase ( self ) -> GraphUndirectedWeighted[T]:
_A = []
_A = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda lowerCAmelCase_ : x[2] )
# creating the disjoint set
_A = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowerCamelCase__ )
# MST generation
_A = 0
_A = 0
_A = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_A = edges[index]
index += 1
_A = disjoint_set.find_set(lowerCamelCase__ )
_A = disjoint_set.find_set(lowerCamelCase__ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
disjoint_set.union(lowerCamelCase__ , lowerCamelCase__ )
return graph
| 718
|
import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( __A , __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionSAGPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = False
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a = CLIPTextModel(__A )
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case_ ( self , __A , __A=0 ):
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
__a = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
__a = sag_pipe.to(__A )
sag_pipe.set_progress_bar_config(disable=__A )
__a = """."""
__a = torch.manual_seed(0 )
__a = sag_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def snake_case_ ( self ):
__a = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__a = sag_pipe.to(__A )
sag_pipe.set_progress_bar_config(disable=__A )
__a = """."""
__a = torch.manual_seed(0 )
__a = sag_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def snake_case_ ( self ):
__a = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__a = sag_pipe.to(__A )
sag_pipe.set_progress_bar_config(disable=__A )
__a = """."""
__a = torch.manual_seed(0 )
__a = sag_pipe(
[prompt] , width=768 , height=512 , generator=__A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
__a = output.images
assert image.shape == (1, 512, 768, 3)
| 99
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any]=13 , lowerCamelCase__ : Any=32 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : str=4 , lowerCamelCase__ : Tuple=[10, 20, 30, 40] , lowerCamelCase__ : Any=[2, 2, 3, 2] , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : int=37 , lowerCamelCase__ : Optional[int]="gelu" , lowerCamelCase__ : str=10 , lowerCamelCase__ : Dict=0.0_2 , lowerCamelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , lowerCamelCase__ : Dict=[2, 3, 4] , lowerCamelCase__ : Any=None , ) -> int:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = out_features
__lowercase = out_indices
__lowercase = scope
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : int ) -> Dict:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Any ) -> Tuple:
"""simple docstring"""
__lowercase = ConvNextModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = ConvNextForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase = None
__lowercase = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : int = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : Any = True
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : int = False
UpperCamelCase_ : Union[str, Any] = False
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self : List[str] ) -> Any:
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ):
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ConvNextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A( ) -> Tuple:
'''simple docstring'''
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
__lowercase = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(lowerCamelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
# verify the logits
__lowercase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowercase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@require_torch
class a ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = (ConvNextBackbone,) if is_torch_available() else ()
UpperCamelCase_ : str = ConvNextConfig
UpperCamelCase_ : Optional[int] = False
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
| 332
| 0
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=32 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=[10, 20, 30, 40] , UpperCamelCase__=[2, 2, 3, 2] , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=["stage2", "stage3", "stage4"] , UpperCamelCase__=[2, 3, 4] , UpperCamelCase__=None , ):
SCREAMING_SNAKE_CASE_ : Optional[int] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_stages
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_sizes
SCREAMING_SNAKE_CASE_ : Optional[Any] = depths
SCREAMING_SNAKE_CASE_ : int = is_training
SCREAMING_SNAKE_CASE_ : Any = use_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Dict = out_features
SCREAMING_SNAKE_CASE_ : List[str] = out_indices
SCREAMING_SNAKE_CASE_ : int = scope
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = ConvNextModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : str = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : int = False
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = ConvNextModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def __snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self ):
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def __snake_case ( self ):
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def __snake_case ( self ):
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(_a )
SCREAMING_SNAKE_CASE_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**self._prepare_for_class(_a , _a ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
check_hidden_states_output(_a , _a , _a )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __snake_case ( self ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowerCamelCase( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(_a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : int = image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Tuple = model(**_a )
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class __a ( unittest.TestCase , UpperCamelCase__ ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = (ConvNextBackbone,) if is_torch_available() else ()
UpperCAmelCase__ : List[Any] = ConvNextConfig
UpperCAmelCase__ : Tuple = False
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConvNextModelTester(self )
| 710
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCamelCase( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _lowerCamelCase( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=True ):
'''simple docstring'''
model.train()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = F.mse_loss(lowerCAmelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCAmelCase__ )
def _lowerCamelCase( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False ):
'''simple docstring'''
set_seed(42 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RegressionModel()
SCREAMING_SNAKE_CASE_ : Optional[Any] = deepcopy(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE_ : str = DataLoader(lowerCAmelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE_ : Dict = AdamW(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE_ : List[str] = LambdaLR(lowerCAmelCase__ , lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 )
SCREAMING_SNAKE_CASE_ : Tuple = LambdaLR(lowerCAmelCase__ , lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = get_training_setup(lowerCAmelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = next(iter(lowerCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# Sync grads
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
def _lowerCamelCase( lowerCAmelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = get_training_setup(lowerCAmelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = next(iter(lowerCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# Sync grads
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
def _lowerCamelCase( lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Union[str, Any]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator(
split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_training_setup(lowerCAmelCase__ )
for iteration, batch in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
SCREAMING_SNAKE_CASE_ : str = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
GradientState._reset_state()
def _lowerCamelCase( lowerCAmelCase__ : int=False , lowerCAmelCase__ : List[str]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = Accelerator(
split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = get_training_setup(lowerCAmelCase__ , lowerCAmelCase__ )
for iteration, batch in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
SCREAMING_SNAKE_CASE_ : Optional[int] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowerCamelCase( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
SCREAMING_SNAKE_CASE_ : int = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE_ : str = DataLoader(lowerCAmelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE_ : Dict = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE_ : Tuple = DataLoader(lowerCAmelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ )
if iteration < len(lowerCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ )
if batch_num < len(lowerCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCamelCase( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Accelerator()
SCREAMING_SNAKE_CASE_ : str = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(lowerCAmelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(lowerCAmelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(lowerCAmelCase__ , lowerCAmelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCamelCase( lowerCAmelCase__ : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 97
| 0
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def UpperCamelCase_ ( A__ ):
a_ = min(A__ ) # min() finds the minimum value
a_ = max(A__ ) # max() finds the maximum value
a_ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
a_ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(A__ , A__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
a_ = 0
for count in range(A__ ):
while holes[count] > 0:
holes[count] -= 1
a_ = count + min_val
i += 1
def UpperCamelCase_ ( ):
a_ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(A__ )
print("""Sorted order is:""" , """ """.join(A__ ) )
if __name__ == "__main__":
main()
| 263
|
'''simple docstring'''
import math
def UpperCamelCase_ ( A__ ):
return math.sqrt(A__ ) * math.sqrt(A__ ) == num
def UpperCamelCase_ ( A__ ):
a_ = 0
a_ = n
while left <= right:
a_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
a_ = mid - 1
else:
a_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263
| 1
|
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ = 0 ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = key
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> list[str]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(SCREAMING_SNAKE_CASE__ ) ^ key ) for ch in content]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> list[str]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(SCREAMING_SNAKE_CASE__ ) ^ key ) for ch in content]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0 ) -> str:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """"""
for ch in content:
ans += chr(ord(SCREAMING_SNAKE_CASE__ ) ^ key )
return ans
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0 ) -> str:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
SCREAMING_SNAKE_CASE__ : List[str] = """"""
for ch in content:
ans += chr(ord(SCREAMING_SNAKE_CASE__ ) ^ key )
return ans
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0 ) -> bool:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
with open(SCREAMING_SNAKE_CASE__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
except OSError:
return False
return True
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bool:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
with open(SCREAMING_SNAKE_CASE__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 545
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = [file for file in os.listdir(SCREAMING_SNAKE_CASE__ ) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )]
if identifier is not None:
SCREAMING_SNAKE_CASE__ : int = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for n_ in n_identifier:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [file for file in files if n_ not in file]
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [file for file in files if n_identifier not in file]
SCREAMING_SNAKE_CASE__ : int = ignore_files or []
ignore_files.append("""__init__.py""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , SCREAMING_SNAKE_CASE__ )
if only_modules:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = file.split(""".""" )[0]
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Path("""src/transformers""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = """modeling"""
SCREAMING_SNAKE_CASE__ : int = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ , identifier=SCREAMING_SNAKE_CASE__ , ignore_files=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = Path("""src/transformers""" )
SCREAMING_SNAKE_CASE__ : List[str] = """tokenization"""
self.analyze_directory(SCREAMING_SNAKE_CASE__ , identifier=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path("""src/transformers""" )
SCREAMING_SNAKE_CASE__ : List[str] = """configuration"""
self.analyze_directory(SCREAMING_SNAKE_CASE__ , identifier=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path("""src/transformers""" )
SCREAMING_SNAKE_CASE__ : Dict = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(SCREAMING_SNAKE_CASE__ , n_identifier=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Path("""docs/source""" )
SCREAMING_SNAKE_CASE__ : Any = ["""favicon.ico"""]
self.analyze_directory(SCREAMING_SNAKE_CASE__ , ignore_files=SCREAMING_SNAKE_CASE__ , only_modules=SCREAMING_SNAKE_CASE__ )
| 545
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_A : Union[str, Any] = logging.get_logger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = feature_size
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = padding_value
SCREAMING_SNAKE_CASE__ = kwargs.pop('''padding_side''' , '''right''' )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''return_attention_mask''' , A_ )
super().__init__(**A_ )
def lowercase_ ( self , A_ , A_ = True , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , ):
'''simple docstring'''
if isinstance(A_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE__ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
SCREAMING_SNAKE_CASE__ = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE__ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A_ ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE__ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE__ = required_input[0]
if isinstance(A_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE__ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A_ ):
SCREAMING_SNAKE_CASE__ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A_ ):
SCREAMING_SNAKE_CASE__ = '''tf'''
elif is_torch_tensor(A_ ):
SCREAMING_SNAKE_CASE__ = '''pt'''
elif isinstance(A_ , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE__ = '''np'''
else:
raise ValueError(
f'''type of {first_element} unknown: {type(A_ )}. '''
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE__ = to_numpy(A_ )
else:
SCREAMING_SNAKE_CASE__ = [to_numpy(A_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE__ = self._get_padding_strategies(padding=A_ , max_length=A_ )
SCREAMING_SNAKE_CASE__ = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE__ = len(A_ )
if not all(len(A_ ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
SCREAMING_SNAKE_CASE__ = []
for i in range(A_ ):
SCREAMING_SNAKE_CASE__ = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE__ = self._truncate(
A_ , max_length=A_ , pad_to_multiple_of=A_ , truncation=A_ , )
truncated_inputs.append(A_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE__ = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE__ = {}
for i in range(A_ ):
# padding
SCREAMING_SNAKE_CASE__ = self._pad(
truncated_inputs[i] , max_length=A_ , padding_strategy=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE__ = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = value.astype(np.floataa )
batch_outputs[key].append(A_ )
return BatchFeature(A_ , tensor_type=A_ )
def lowercase_ ( self , A_ , A_ = None , A_ = PaddingStrategy.DO_NOT_PAD , A_ = None , A_ = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE__ = len(A_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE__ = np.ones(len(A_ ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE__ = max_length - len(A_ )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE__ = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
SCREAMING_SNAKE_CASE__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE__ = np.pad(
A_ , A_ , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE__ = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
SCREAMING_SNAKE_CASE__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE__ = np.pad(
A_ , A_ , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def lowercase_ ( self , A_ , A_ = None , A_ = None , A_ = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
SCREAMING_SNAKE_CASE__ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE__ = len(A_ ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE__ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE__ = processed_features['''attention_mask'''][:max_length]
return processed_features
def lowercase_ ( self , A_=False , A_=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A_ , A_ ):
SCREAMING_SNAKE_CASE__ = PaddingStrategy(A_ )
elif isinstance(A_ , A_ ):
SCREAMING_SNAKE_CASE__ = padding
else:
SCREAMING_SNAKE_CASE__ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 100
|
'''simple docstring'''
import random
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
snake_case: Optional[int] = num - 1
snake_case: List[str] = 0
while s % 2 == 0:
snake_case: Union[str, Any] = s // 2
t += 1
for _ in range(5 ):
snake_case: Union[str, Any] = random.randrange(2 , num - 1 )
snake_case: Tuple = pow(__A , __A , __A )
if v != 1:
snake_case: Union[str, Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case: int = i + 1
snake_case: int = (v**2) % num
return True
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if num < 2:
return False
snake_case: List[Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__A )
def lowerCAmelCase_ ( __A : int = 10_24 ):
'''simple docstring'''
while True:
snake_case: Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__A ):
return num
if __name__ == "__main__":
__UpperCAmelCase = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 329
| 0
|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _snake_case ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
A = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , snake_case__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _snake_case ( ):
assert _test_patching.open is open
A = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , snake_case__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _snake_case ( ):
# pandas.read_csv is not present in _test_patching
A = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , snake_case__ ):
pass
def _snake_case ( ):
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
A = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , snake_case__ ) is None
with patch_submodule(_test_patching , 'len' , snake_case__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _snake_case ( ):
A = '__test_patch_submodule_start_and_stop_mock__'
A = patch_submodule(_test_patching , 'open' , snake_case__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _snake_case ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
A = '__test_patch_submodule_successive_join__'
A = '__test_patch_submodule_successive_dirname__'
A = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , snake_case__ ):
with patch_submodule(_test_patching , 'os.rename' , snake_case__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , snake_case__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , snake_case__ ):
with patch_submodule(_test_patching , 'os.path.join' , snake_case__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , snake_case__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _snake_case ( ):
A = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , snake_case__ ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , snake_case__ ):
pass
| 22
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]:
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
A = output_channel
A = block_out_channels[i]
A = i == len(A_ ) - 1
A = get_down_block(
A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,)
self.down_blocks.append(A_ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = x
A = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : Dict ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ )
else:
# down
for down_block in self.down_blocks:
A = down_block(A_ )
# middle
A = self.mid_block(A_ )
# post-process
A = self.conv_norm_out(A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any:
super().__init__()
A = layers_per_block
A = nn.Convad(
A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# up
A = list(reversed(A_ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(A_ ) - 1
A = get_up_block(
A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,)
self.up_blocks.append(A_ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] ,A_ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any:
A = z
A = self.conv_in(A_ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : List[Any] ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ )
else:
# middle
A = self.mid_block(A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = up_block(A_ ,A_ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(A_ )
else:
A = self.conv_norm_out(A_ ,A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]:
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A = n_e
A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ )
return back.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 ,2 ,3 ,1 ).contiguous()
A = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 )
A = self.embedding(A_ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
A = self.remap_to_used(A_ )
A = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] ,-1 ) # add batch axis
A = self.unmap_to_all(A_ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(A_ )
if shape is not None:
A = z_q.view(A_ )
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]:
A = parameters
A , A = torch.chunk(A_ ,2 ,dim=1 )
A = torch.clamp(self.logvar ,-30.0 ,20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.mean
| 22
| 1
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = len(__magic_name__ )
for _ in range(__magic_name__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
a : Tuple = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 679
|
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a : Optional[int] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = R"\w+[.]\d+"
UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
'''simple docstring'''
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ )
UpperCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = rename_key(__magic_name__ )
UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 679
| 1
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = (CMStochasticIterativeScheduler,)
__A = 10
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> int:
"""simple docstring"""
_UpperCamelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
config.update(**lowercase_)
return config
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = 10
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = self.scheduler_classes[0](**lowercase_)
scheduler.set_timesteps(lowercase_)
_UpperCamelCase = scheduler.timesteps[0]
_UpperCamelCase = scheduler.timesteps[1]
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
_UpperCamelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_).prev_sample
_UpperCamelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowercase_)
_UpperCamelCase = 1
scheduler.set_timesteps(lowercase_)
_UpperCamelCase = scheduler.timesteps
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowercase_):
# 1. scale model input
_UpperCamelCase = scheduler.scale_model_input(lowercase_ , lowercase_)
# 2. predict noise residual
_UpperCamelCase = model(lowercase_ , lowercase_)
# 3. predict previous sample x_t-1
_UpperCamelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_).prev_sample
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(lowercase_))
_UpperCamelCase = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 192.7614) < 1e-2
assert abs(result_mean.item() - 0.25_10) < 1e-3
def __UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowercase_)
_UpperCamelCase = [106, 0]
scheduler.set_timesteps(timesteps=lowercase_)
_UpperCamelCase = scheduler.timesteps
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_UpperCamelCase = scheduler.scale_model_input(lowercase_ , lowercase_)
# 2. predict noise residual
_UpperCamelCase = model(lowercase_ , lowercase_)
# 3. predict previous sample x_t-1
_UpperCamelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_).prev_sample
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(lowercase_))
_UpperCamelCase = torch.mean(torch.abs(lowercase_))
assert abs(result_sum.item() - 347.6357) < 1e-2
assert abs(result_mean.item() - 0.45_27) < 1e-3
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowercase_)
_UpperCamelCase = [39, 30, 12, 15, 0]
with self.assertRaises(lowercase_ , msg="`timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowercase_)
_UpperCamelCase = [39, 30, 12, 1, 0]
_UpperCamelCase = len(lowercase_)
with self.assertRaises(lowercase_ , msg="Can only pass one of `num_inference_steps` or `timesteps`."):
scheduler.set_timesteps(num_inference_steps=lowercase_ , timesteps=lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowercase_)
_UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=lowercase_)
| 721
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82
| 0
|
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list[int]:
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
lowercase : int = [True] * (num + 1)
lowercase : List[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Any = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 336
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__UpperCAmelCase = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def lowercase__ ( lowerCamelCase : List[str]=None ) -> List[Any]:
if subparsers is not None:
lowerCAmelCase__ : int = subparsers.add_parser("tpu-config" , description=_description )
else:
lowerCAmelCase__ : int = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
lowerCAmelCase__ : Optional[Any] = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=lowerCamelCase , default=lowerCamelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=lowerCamelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=lowerCamelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
lowerCAmelCase__ : List[Any] = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=lowerCamelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase )
return parser
def lowercase__ ( lowerCamelCase : List[str] ) -> List[str]:
lowerCAmelCase__ : Optional[int] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCamelCase ):
lowerCAmelCase__ : Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCAmelCase__ : Optional[int] = defaults.command_file
if not args.command and defaults.commands is not None:
lowerCAmelCase__ : str = defaults.commands
if not args.tpu_name:
lowerCAmelCase__ : Optional[Any] = defaults.tpu_name
if not args.tpu_zone:
lowerCAmelCase__ : List[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCAmelCase__ : List[Any] = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
lowerCAmelCase__ : Tuple = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , lowerCamelCase ):
lowerCAmelCase__ : str = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
lowerCAmelCase__ : Union[str, Any] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCamelCase ):
lowerCAmelCase__ : str = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCAmelCase__ : Dict = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
lowerCAmelCase__ : List[str] = "; ".join(lowerCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCAmelCase__ : str = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(lowerCamelCase )}" )
return
subprocess.run(lowerCamelCase )
print("Successfully setup pod." )
def lowercase__ ( ) -> Any:
lowerCAmelCase__ : Optional[Any] = tpu_command_parser()
lowerCAmelCase__ : Dict = parser.parse_args()
tpu_command_launcher(lowerCamelCase )
| 308
| 0
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
'''simple docstring'''
UpperCamelCase__ = None
def __lowercase ( self) -> List[str]:
'''simple docstring'''
lowercase__: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__: int = json.loads(feat_extract.to_json_string())
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCAmelCase_)
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: Tuple = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Union[str, Any] = os.path.join(UpperCAmelCase_ , "feat_extract.json")
feat_extract_first.to_json_file(UpperCAmelCase_)
lowercase__: int = self.feature_extraction_class.from_json_file(UpperCAmelCase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: str = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Dict = feat_extract_first.save_pretrained(UpperCAmelCase_)[0]
check_json_file_has_correct_format(UpperCAmelCase_)
lowercase__: Optional[int] = self.feature_extraction_class.from_pretrained(UpperCAmelCase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
lowercase__: List[Any] = self.feature_extraction_class()
self.assertIsNotNone(UpperCAmelCase_)
| 708
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ = ConsistencyModelPipeline
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
UpperCamelCase__ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: List[str] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Any = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def __lowercase ( self , UpperCAmelCase_=False) -> str:
'''simple docstring'''
if class_cond:
lowercase__: List[str] = self.dummy_cond_unet
else:
lowercase__: List[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
lowercase__: List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: Any = {
"unet": unet,
"scheduler": scheduler,
}
return components
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_=0) -> Union[str, Any]:
'''simple docstring'''
if str(UpperCAmelCase_).startswith("mps"):
lowercase__: List[str] = torch.manual_seed(UpperCAmelCase_)
else:
lowercase__: Optional[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowercase__: Optional[Any] = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__: List[Any] = self.get_dummy_components()
lowercase__: List[str] = ConsistencyModelPipeline(**UpperCAmelCase_)
lowercase__: List[str] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
lowercase__: Optional[int] = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 32, 32, 3)
lowercase__: List[str] = image[0, -3:, -3:, -1]
lowercase__: Union[str, Any] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__: Optional[Any] = self.get_dummy_components(class_cond=UpperCAmelCase_)
lowercase__: List[str] = ConsistencyModelPipeline(**UpperCAmelCase_)
lowercase__: Union[str, Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
lowercase__: Optional[int] = 0
lowercase__: Union[str, Any] = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 32, 32, 3)
lowercase__: Optional[int] = image[0, -3:, -3:, -1]
lowercase__: Optional[int] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__: str = self.get_dummy_components()
lowercase__: Optional[int] = ConsistencyModelPipeline(**UpperCAmelCase_)
lowercase__: List[str] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: str = self.get_dummy_inputs(UpperCAmelCase_)
lowercase__: Any = 1
lowercase__: str = None
lowercase__: Dict = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 32, 32, 3)
lowercase__: List[Any] = image[0, -3:, -3:, -1]
lowercase__: int = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__: List[str] = self.get_dummy_components(class_cond=UpperCAmelCase_)
lowercase__: Any = ConsistencyModelPipeline(**UpperCAmelCase_)
lowercase__: int = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
lowercase__: Optional[int] = 1
lowercase__: Tuple = None
lowercase__: int = 0
lowercase__: str = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 32, 32, 3)
lowercase__: Tuple = image[0, -3:, -3:, -1]
lowercase__: Union[str, Any] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , UpperCAmelCase_=0 , UpperCAmelCase_=False , UpperCAmelCase_="cpu" , UpperCAmelCase_=torch.floataa , UpperCAmelCase_=(1, 3, 64, 64)) -> List[Any]:
'''simple docstring'''
lowercase__: Any = torch.manual_seed(UpperCAmelCase_)
lowercase__: Union[str, Any] = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
lowercase__: Dict = self.get_fixed_latents(seed=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ , shape=UpperCAmelCase_)
lowercase__: Tuple = latents
return inputs
def __lowercase ( self , UpperCAmelCase_=0 , UpperCAmelCase_="cpu" , UpperCAmelCase_=torch.floataa , UpperCAmelCase_=(1, 3, 64, 64)) -> Tuple:
'''simple docstring'''
if type(UpperCAmelCase_) == str:
lowercase__: Optional[int] = torch.device(UpperCAmelCase_)
lowercase__: Any = torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowercase__: Optional[int] = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_)
return latents
def __lowercase ( self) -> int:
'''simple docstring'''
lowercase__: int = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
lowercase__: int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: List[Any] = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
pipe.to(torch_device=UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: Any = self.get_inputs()
lowercase__: Any = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = image[0, -3:, -3:, -1]
lowercase__: Tuple = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
lowercase__: Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: str = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
pipe.to(torch_device=UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: List[str] = self.get_inputs()
lowercase__: Optional[int] = 1
lowercase__: int = None
lowercase__: List[Any] = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 64, 64, 3)
lowercase__: int = image[0, -3:, -3:, -1]
lowercase__: List[Any] = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
@require_torch_a
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
lowercase__: Dict = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
lowercase__: List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: Dict = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
pipe.to(torch_device=UpperCAmelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: List[str] = self.get_inputs(get_fixed_latents=UpperCAmelCase_ , device=UpperCAmelCase_)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCAmelCase_ , enable_math=UpperCAmelCase_ , enable_mem_efficient=UpperCAmelCase_):
lowercase__: Union[str, Any] = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 64, 64, 3)
lowercase__: Union[str, Any] = image[0, -3:, -3:, -1]
lowercase__: Any = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@require_torch_a
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: int = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
lowercase__: Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: List[Any] = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
pipe.to(torch_device=UpperCAmelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: List[Any] = self.get_inputs(get_fixed_latents=UpperCAmelCase_ , device=UpperCAmelCase_)
lowercase__: Optional[Any] = 1
lowercase__: Dict = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCAmelCase_ , enable_math=UpperCAmelCase_ , enable_mem_efficient=UpperCAmelCase_):
lowercase__: str = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 64, 64, 3)
lowercase__: Any = image[0, -3:, -3:, -1]
lowercase__: Any = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 120
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__A : Dict = logging.get_logger(__name__)
# General docstring
__A : List[Any] = 'MobileNetV1Config'
# Base docstring
__A : List[str] = 'google/mobilenet_v1_1.0_224'
__A : Any = [1, 1_0_2_4, 7, 7]
# Image classification docstring
__A : List[str] = 'google/mobilenet_v1_1.0_224'
__A : str = 'tabby, tabby cat'
__A : int = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __a ( A__ : Dict , A__ : Union[str, Any] , A__ : Optional[int]=None ):
SCREAMING_SNAKE_CASE = {}
if isinstance(A__ , A__ ):
SCREAMING_SNAKE_CASE = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE = i + 1
SCREAMING_SNAKE_CASE = i * 2
SCREAMING_SNAKE_CASE = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE = F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE = pointer.convolution.weight
SCREAMING_SNAKE_CASE = pointer.normalization.bias
SCREAMING_SNAKE_CASE = pointer.normalization.weight
SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE = pointer.normalization.running_var
SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE = F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE = pointer.convolution.weight
SCREAMING_SNAKE_CASE = pointer.normalization.bias
SCREAMING_SNAKE_CASE = pointer.normalization.weight
SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE = pointer.normalization.running_var
if isinstance(A__ , A__ ):
SCREAMING_SNAKE_CASE = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE = model.classifier.weight
SCREAMING_SNAKE_CASE = model.classifier.bias
return tf_to_pt_map
def __a ( A__ : List[str] , A__ : Optional[int] , A__ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE = tf.train.list_variables(A__ )
SCREAMING_SNAKE_CASE = {}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE = tf.train.load_variable(A__ , A__ )
SCREAMING_SNAKE_CASE = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(A__ , A__ , A__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
SCREAMING_SNAKE_CASE = np.transpose(A__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE = np.transpose(A__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE = torch.from_numpy(A__ )
tf_weights.pop(A__ , A__ )
tf_weights.pop(name + "/RMSProp" , A__ )
tf_weights.pop(name + "/RMSProp_1" , A__ )
tf_weights.pop(name + "/ExponentialMovingAverage" , A__ )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def __a ( A__ : torch.Tensor , A__ : nn.Convad ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = features.shape[-2:]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = conv_layer.stride
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE = pad_along_width // 2
SCREAMING_SNAKE_CASE = pad_along_width - pad_left
SCREAMING_SNAKE_CASE = pad_along_height // 2
SCREAMING_SNAKE_CASE = pad_along_height - pad_top
SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(A__ , A__ , "constant" , 0.0 )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : MobileNetVaConfig , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[bool] = True , __lowerCamelCase : Optional[bool or str] = True , ):
super().__init__()
SCREAMING_SNAKE_CASE = config
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." )
SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=__lowerCamelCase , stride=__lowerCamelCase , padding=__lowerCamelCase , groups=__lowerCamelCase , bias=__lowerCamelCase , padding_mode="zeros" , )
if use_normalization:
SCREAMING_SNAKE_CASE = nn.BatchNormad(
num_features=__lowerCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=__lowerCamelCase , track_running_stats=__lowerCamelCase , )
else:
SCREAMING_SNAKE_CASE = None
if use_activation:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE = config.hidden_act
else:
SCREAMING_SNAKE_CASE = None
def _snake_case ( self : Dict , __lowerCamelCase : torch.Tensor ):
if self.config.tf_padding:
SCREAMING_SNAKE_CASE = apply_tf_padding(__lowerCamelCase , self.convolution )
SCREAMING_SNAKE_CASE = self.convolution(__lowerCamelCase )
if self.normalization is not None:
SCREAMING_SNAKE_CASE = self.normalization(__lowerCamelCase )
if self.activation is not None:
SCREAMING_SNAKE_CASE = self.activation(__lowerCamelCase )
return features
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = MobileNetVaConfig
lowerCamelCase__ = load_tf_weights_in_mobilenet_va
lowerCamelCase__ = "mobilenet_v1"
lowerCamelCase__ = "pixel_values"
lowerCamelCase__ = False
def _snake_case ( self : List[Any] , __lowerCamelCase : Union[nn.Linear, nn.Convad] ):
if isinstance(__lowerCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__A : int = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__A : List[str] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , __snake_case , )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : MobileNetVaConfig , __lowerCamelCase : bool = True ):
super().__init__(__lowerCamelCase )
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
SCREAMING_SNAKE_CASE = MobileNetVaConvLayer(
__lowerCamelCase , in_channels=config.num_channels , out_channels=__lowerCamelCase , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE = nn.ModuleList()
for i in range(13 ):
SCREAMING_SNAKE_CASE = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=__lowerCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=1 , ) )
SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self : List[str] , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
SCREAMING_SNAKE_CASE = self.conv_stem(__lowerCamelCase )
SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
SCREAMING_SNAKE_CASE = layer_module(__lowerCamelCase )
if output_hidden_states:
SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(__lowerCamelCase ) , start_dim=1 )
else:
SCREAMING_SNAKE_CASE = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCamelCase , pooler_output=__lowerCamelCase , hidden_states=__lowerCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __snake_case , )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : MobileNetVaConfig ):
super().__init__(__lowerCamelCase )
SCREAMING_SNAKE_CASE = config.num_labels
SCREAMING_SNAKE_CASE = MobileNetVaModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=__lowerCamelCase )
SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.mobilenet_va(__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase )
SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE = self.classifier(self.dropout(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE = "single_label_classification"
else:
SCREAMING_SNAKE_CASE = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE = loss_fct(__lowerCamelCase , __lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE = CrossEntropyLoss()
SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE = loss_fct(__lowerCamelCase , __lowerCamelCase )
if not return_dict:
SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowerCamelCase , logits=__lowerCamelCase , hidden_states=outputs.hidden_states , )
| 16
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Tuple = "true"
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int=82 , lowercase__ : str=16 ) -> Tuple:
'''simple docstring'''
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(lowercase__ )
lowerCAmelCase__ = RegressionDataset(length=lowercase__ )
lowerCAmelCase__ = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ : Any ):
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ : Any ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
lowerCAmelCase__ = get_dataloader(lowercase__ , not dispatch_batches )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase__ = []
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
with torch.no_grad():
lowerCAmelCase__ = model(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=82 , lowercase__ : List[Any]=False , lowercase__ : Optional[int]=False , lowercase__ : Union[str, Any]=16 ) -> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'
def lowerCAmelCase_ (lowercase__ : bool = False , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ , lowerCAmelCase__ = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
lowerCAmelCase__ = metric.compute()
# Then do distributed
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ = batch['''labels''']
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
lowerCAmelCase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowerCAmelCase_ () -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ = Accelerator()
test_torch_metrics(lowercase__ , 5_12 )
accelerator.state._reset_state()
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668
| 0
|
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__snake_case = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
__snake_case = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
__snake_case = r'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
_a = 0.0
for i, j in zip(__UpperCamelCase , __UpperCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(__UpperCamelCase , __UpperCamelCase ) else 0.0
_a = n_correct / len(__UpperCamelCase )
return {
"accuracy": accuracy,
}
| 715
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 285
| 0
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> None:
UpperCamelCase : int = pos_x
UpperCamelCase : Optional[Any] = pos_y
UpperCamelCase : Tuple = (pos_y, pos_x)
UpperCamelCase : Any = goal_x
UpperCamelCase : Union[str, Any] = goal_y
UpperCamelCase : Optional[int] = g_cost
UpperCamelCase : str = parent
UpperCamelCase : Optional[Any] = self.calculate_heuristic()
UpperCamelCase : str = self.g_cost + self.h_cost
def snake_case_ ( self ) -> float:
UpperCamelCase : str = self.pos_x - self.goal_x
UpperCamelCase : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(SCREAMING_SNAKE_CASE_ ) + abs(SCREAMING_SNAKE_CASE_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self, SCREAMING_SNAKE_CASE_ ) -> bool:
return self.f_cost < other.f_cost
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : List[Any] = Node(start[1], start[0], goal[1], goal[0], 0, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = [self.start]
UpperCamelCase : list[Node] = []
UpperCamelCase : Optional[int] = False
def snake_case_ ( self ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCamelCase : Union[str, Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(SCREAMING_SNAKE_CASE_ )
self.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.get_successors(SCREAMING_SNAKE_CASE_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
# retrieve the best current path
UpperCamelCase : List[str] = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
return [self.start.pos]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> list[Node]:
UpperCamelCase : Any = []
for action in delta:
UpperCamelCase : str = parent.pos_x + action[1]
UpperCamelCase : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, SCREAMING_SNAKE_CASE_, ) )
return successors
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> list[TPosition]:
UpperCamelCase : Union[str, Any] = node
UpperCamelCase : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase : Union[str, Any] = current_node.parent
path.reverse()
return path
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Union[str, Any] = AStar(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AStar(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = False
def snake_case_ ( self ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCamelCase : Union[str, Any] = self.fwd_astar.open_nodes.pop(0 )
UpperCamelCase : str = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = current_bwd_node
UpperCamelCase : Tuple = current_fwd_node
UpperCamelCase : Tuple = {
self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ),
self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
# retrieve the best current path
UpperCamelCase : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(SCREAMING_SNAKE_CASE_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
return [self.fwd_astar.start.pos]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> list[TPosition]:
UpperCamelCase : Optional[Any] = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 40
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={"help": "The column name of the images in the files."} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(default=__magic_name__ , metadata={"help": "A folder containing the training data."} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(default=__magic_name__ , metadata={"help": "A folder containing the validation data."} )
SCREAMING_SNAKE_CASE_ : Optional[float] =field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
SCREAMING_SNAKE_CASE_ : Optional[int] =field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] =field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = {}
if self.train_dir is not None:
UpperCamelCase = self.train_dir
if self.validation_dir is not None:
UpperCamelCase = self.validation_dir
UpperCamelCase = data_files if data_files else None
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =field(
default=__magic_name__ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
SCREAMING_SNAKE_CASE_ : str =field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE_ : str =field(default=__magic_name__ , metadata={"help": "Name or path of preprocessor config."} )
SCREAMING_SNAKE_CASE_ : bool =field(
default=__magic_name__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE_ : float =field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
SCREAMING_SNAKE_CASE_ : bool =field(
default=__magic_name__ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : float =field(
default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCamelCase = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def __lowerCamelCase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowercase ) and data_args.train_val_split > 0.0:
UpperCamelCase = ds['train'].train_test_split(data_args.train_val_split )
UpperCamelCase = split['train']
UpperCamelCase = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **_lowercase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_lowercase )
else:
UpperCamelCase = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_lowercase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_lowercase )
else:
UpperCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
if training_args.do_train:
UpperCamelCase = ds['train'].column_names
else:
UpperCamelCase = ds['validation'].column_names
if data_args.image_column_name is not None:
UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase = 'image'
elif "img" in column_names:
UpperCamelCase = 'img'
else:
UpperCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase = image_processor.size['shortest_edge']
else:
UpperCamelCase = (image_processor.size['height'], image_processor.size['width'])
UpperCamelCase = Compose(
[
Lambda(lambda _lowercase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_lowercase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_lowercase ):
UpperCamelCase = [transforms(_lowercase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
UpperCamelCase = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
UpperCamelCase = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_lowercase )
# Compute absolute learning rate
UpperCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics('eval' , _lowercase )
trainer.save_metrics('eval' , _lowercase )
# Write model card and (optionally) push to hub
UpperCamelCase = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def __lowerCamelCase ( _lowercase ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 282
| 0
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__a = {
"""wmt16-en-de-dist-12-1""": [28.3, 27.52],
"""wmt16-en-de-dist-6-1""": [27.4, 27.11],
"""wmt16-en-de-12-1""": [26.9, 25.75],
}
__a = f"{src_lang}-{tgt_lang}"
__a = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
__a = os.path.join(_SCREAMING_SNAKE_CASE , """README.md""" )
print(f"Generating {path}" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowerCamelCase__ = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 547
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ):
"""simple docstring"""
__a = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__a = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_SCREAMING_SNAKE_CASE : int ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_SCREAMING_SNAKE_CASE : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a = 16
elif accelerator.mixed_precision != "no":
__a = 8
else:
__a = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__a = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase__ = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _SCREAMING_SNAKE_CASE ) == "1":
__a = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__a = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
__a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config["""lr"""]
__a = int(config["""num_epochs"""] )
__a = int(config["""seed"""] )
__a = int(config["""batch_size"""] )
set_seed(_SCREAMING_SNAKE_CASE )
__a , __a = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__a = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__a = batch_size // MAX_GPU_BATCH_SIZE
__a = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a = model.to(accelerator.device )
# Instantiate optimizer
__a = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
__a = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__a = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split(""".""" )[0]
accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__a = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__a = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits.argmax(dim=-1 )
__a , __a = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _SCREAMING_SNAKE_CASE )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(_SCREAMING_SNAKE_CASE ),
"""epoch""": epoch,
} , step=_SCREAMING_SNAKE_CASE , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=_SCREAMING_SNAKE_CASE , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
__a = parser.parse_args()
__a = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 547
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='gptsan-japanese'
lowerCamelCase__ =[
'past_key_values',
]
lowerCamelCase__ ={
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : int , a : Any=3_6000 , a : Optional[int]=1280 , a : Tuple=1024 , a : Optional[Any]=8192 , a : List[str]=4096 , a : Optional[Any]=128 , a : Optional[Any]=10 , a : List[Any]=0 , a : Union[str, Any]=16 , a : str=16 , a : Tuple=128 , a : List[str]=0.0 , a : Union[str, Any]=1e-5 , a : int=False , a : Optional[int]=0.0 , a : Union[str, Any]="float32" , a : str=False , a : Dict=False , a : Dict=False , a : Optional[Any]=0.002 , a : Tuple=False , a : List[str]=True , a : int=3_5998 , a : Union[str, Any]=3_5995 , a : int=3_5999 , **a : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = d_model
SCREAMING_SNAKE_CASE : Union[str, Any] = d_ff
SCREAMING_SNAKE_CASE : Optional[Any] = d_ext
SCREAMING_SNAKE_CASE : List[str] = d_spout
SCREAMING_SNAKE_CASE : Optional[Any] = num_switch_layers
SCREAMING_SNAKE_CASE : str = num_ext_layers
SCREAMING_SNAKE_CASE : int = num_switch_layers + num_ext_layers
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Optional[int] = num_experts
SCREAMING_SNAKE_CASE : Optional[int] = expert_capacity
SCREAMING_SNAKE_CASE : List[str] = dropout_rate
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : List[str] = router_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = router_jitter_noise
SCREAMING_SNAKE_CASE : Tuple = router_dtype
SCREAMING_SNAKE_CASE : Dict = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : Tuple = output_hidden_states
SCREAMING_SNAKE_CASE : Union[str, Any] = output_attentions
SCREAMING_SNAKE_CASE : str = initializer_factor
SCREAMING_SNAKE_CASE : Union[str, Any] = output_router_logits
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
super().__init__(
separator_token_id=a , pad_token_id=a , eos_token_id=a , **a , )
| 25
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 676
| 0
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_SCREAMING_SNAKE_CASE : Any = HfApi()
_SCREAMING_SNAKE_CASE : int = {}
# fmt: off
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_SCREAMING_SNAKE_CASE : str = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_SCREAMING_SNAKE_CASE : int = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_SCREAMING_SNAKE_CASE : int = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_SCREAMING_SNAKE_CASE : int = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_SCREAMING_SNAKE_CASE : Dict = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_SCREAMING_SNAKE_CASE : int = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_SCREAMING_SNAKE_CASE : Dict = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_SCREAMING_SNAKE_CASE : int = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_SCREAMING_SNAKE_CASE : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_SCREAMING_SNAKE_CASE : Dict = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F"Started running {mod.modelId}!!!")
if mod.modelId.startswith('''CompVis'''):
_SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_SCREAMING_SNAKE_CASE : Any = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_SCREAMING_SNAKE_CASE : Optional[int] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_SCREAMING_SNAKE_CASE : Any = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1e-3
)
print(F"{mod.modelId} has passed successfully!!!")
| 472
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = AltDiffusionPipeline
a = TEXT_TO_IMAGE_PARAMS
a = TEXT_TO_IMAGE_BATCH_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Tuple ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE__ = 77
SCREAMING_SNAKE_CASE__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=0 ) -> Union[str, Any]:
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : List[Any] ) -> str:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowercase_ ( self : List[Any] ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase_ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ = RobertaSeriesModelWithTransformation(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_encoder
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''A photo of an astronaut'''
SCREAMING_SNAKE_CASE__ = alt_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ = RobertaSeriesModelWithTransformation(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_encoder
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : str ) -> Any:
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = alt_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = alt_pipe([prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type='''numpy''' )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 472
| 1
|
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase ( A__: Optional[Any]=32 , A__: Any=10 , A__: Any=100 , A__: Optional[int]=1026 , A__: str=True , A__: int="data/tokenized_stories_train_wikitext103.jbl" , A__: Union[str, Any]="igf_context_pairs.jbl" , ) -> Any:
set_seed(3 )
# generate train_data and objective_set
__lowerCamelCase , __lowerCamelCase : Tuple = generate_datasets(
snake_case__ , snake_case__ , number=snake_case__ , min_len=1026 , trim=snake_case__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__lowerCamelCase : Union[str, Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
__lowerCamelCase : Optional[Any] = load_gpta('gpt2' ).to(snake_case__ )
print('computing perplexity on objective set' )
__lowerCamelCase : Tuple = compute_perplexity(snake_case__ , snake_case__ , snake_case__ ).item()
print('perplexity on objective set:' , snake_case__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase ( A__: Union[str, Any] , A__: List[str]=15 , A__: Optional[Any]=128 , A__: Tuple=100 , A__: str="igf_model.pt" , ) -> str:
set_seed(42 )
# Load pre-trained model
__lowerCamelCase : str = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
__lowerCamelCase : List[str] = SecondaryLearner(snake_case__ )
# Train secondary learner
__lowerCamelCase : Tuple = train_secondary_learner(
snake_case__ , snake_case__ , max_epochs=snake_case__ , batch_size=snake_case__ , eval_freq=100 , igf_model_path=snake_case__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase ( A__: Union[str, Any] , A__: Any , A__: str , A__: Optional[int]=32 , A__: Any=1000 , A__: Union[str, Any]=16 , A__: List[str]=1.0 , A__: Union[str, Any]=recopy_gpta , A__: List[Any]=None , A__: Optional[int]=10 , A__: Any="gpt2_finetuned.pt" , ) -> List[str]:
__lowerCamelCase : Tuple = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
__lowerCamelCase : Union[str, Any] = RandomSampler(snake_case__ )
__lowerCamelCase : Optional[int] = DataLoader(snake_case__ , sampler=snake_case__ )
__lowerCamelCase : Dict = max_steps // (len(snake_case__ )) + 1
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : List[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=snake_case__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = recopy_model(snake_case__ , snake_case__ , snake_case__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(snake_case__ )
secondary_learner.eval()
__lowerCamelCase : List[str] = []
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : List[str] = []
__lowerCamelCase : List[str] = []
# Compute the performance of the transformer model at the beginning
__lowerCamelCase : Union[str, Any] = compute_perplexity(snake_case__ , snake_case__ , snake_case__ )
test_perps.append(snake_case__ )
print('Test perplexity, step' , snake_case__ , ':' , snake_case__ )
for epoch in range(int(snake_case__ ) ):
for step, example in enumerate(snake_case__ ):
torch.cuda.empty_cache()
__lowerCamelCase : str = random.randint(0 , example.size(2 ) - context_len - 1 )
__lowerCamelCase : Union[str, Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__lowerCamelCase : Optional[int] = model(snake_case__ , labels=snake_case__ )
__lowerCamelCase : List[Any] = True
if secondary_learner is not None:
__lowerCamelCase : Optional[int] = secondary_learner.forward(
torch.tensor(snake_case__ , dtype=torch.long , device=snake_case__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(snake_case__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__lowerCamelCase : str = -1
if predicted_q < threshold:
__lowerCamelCase : List[Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__lowerCamelCase : Optional[int] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__lowerCamelCase : int = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__lowerCamelCase : Union[str, Any] = compute_perplexity(snake_case__ , snake_case__ , snake_case__ )
test_perps.append(snake_case__ )
print('Test perplexity, step' , snake_case__ , ':' , snake_case__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , snake_case__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase ( ) -> List[Any]:
__lowerCamelCase : Any = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=snake_case__ , default=snake_case__ , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=snake_case__ , default=snake_case__ , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=snake_case__ , type=snake_case__ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=snake_case__ , default=snake_case__ , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=snake_case__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=snake_case__ , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=snake_case__ , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=1000 , type=snake_case__ , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=snake_case__ , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=snake_case__ , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=snake_case__ , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=snake_case__ , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=1026 , type=snake_case__ , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=snake_case__ , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=snake_case__ , type=snake_case__ , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=snake_case__ , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=snake_case__ , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=snake_case__ , type=snake_case__ , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=snake_case__ , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
__lowerCamelCase : Union[str, Any] = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
__lowerCamelCase : Dict = training_secondary_learner(
snake_case__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
__lowerCamelCase : Dict = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__lowerCamelCase , __lowerCamelCase : int = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=snake_case__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
snake_case__ , snake_case__ , snake_case__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=snake_case__ , secondary_learner=snake_case__ , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 594
|
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__UpperCAmelCase =grid[0]
for row_n in range(1 , len(snake_case__ ) ):
__UpperCAmelCase =grid[row_n]
__UpperCAmelCase =fill_row(snake_case__ , snake_case__ )
__UpperCAmelCase =grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(snake_case__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 132
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 555
|
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class a ( __magic_name__ ):
_snake_case = ''''''
_snake_case = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_snake_case = None # compression type in fsspec. ex: "gzip"
_snake_case = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Optional[int], SCREAMING_SNAKE_CASE_ : str = "", SCREAMING_SNAKE_CASE_ : Optional[str] = None, SCREAMING_SNAKE_CASE_ : Optional[dict] = None, **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
super().__init__(self, **SCREAMING_SNAKE_CASE_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case : List[Any] = fsspec.open(
SCREAMING_SNAKE_CASE_, mode='''rb''', protocol=SCREAMING_SNAKE_CASE_, compression=self.compression, client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case : Tuple = os.path.basename(self.file.path.split('''::''' )[0] )
snake_case : Optional[Any] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
snake_case : str = None
@classmethod
def __snake_case ( cls : Any, SCREAMING_SNAKE_CASE_ : int ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(SCREAMING_SNAKE_CASE_ ).lstrip('''/''' )
def __snake_case ( self : Optional[int] ):
if self.dir_cache is None:
snake_case : Optional[int] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
snake_case : Tuple = {f['''name''']: f}
def __snake_case ( self : List[Any], SCREAMING_SNAKE_CASE_ : str ):
return self.file.open().read()
def __snake_case ( self : List[Any], SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : str = "rb", SCREAMING_SNAKE_CASE_ : int=None, SCREAMING_SNAKE_CASE_ : Optional[int]=True, SCREAMING_SNAKE_CASE_ : List[Any]=None, **SCREAMING_SNAKE_CASE_ : Any, ):
snake_case : int = self._strip_protocol(SCREAMING_SNAKE_CASE_ )
if mode != "rb":
raise ValueError(F"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class a ( __magic_name__ ):
_snake_case = '''bz2'''
_snake_case = '''bz2'''
_snake_case = '''.bz2'''
class a ( __magic_name__ ):
_snake_case = '''gzip'''
_snake_case = '''gzip'''
_snake_case = '''.gz'''
class a ( __magic_name__ ):
_snake_case = '''lz4'''
_snake_case = '''lz4'''
_snake_case = '''.lz4'''
class a ( __magic_name__ ):
_snake_case = '''xz'''
_snake_case = '''xz'''
_snake_case = '''.xz'''
class a ( __magic_name__ ):
_snake_case = '''zstd'''
_snake_case = '''zstd'''
_snake_case = '''.zst'''
def __init__( self : int, SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : str = "rb", SCREAMING_SNAKE_CASE_ : Optional[str] = None, SCREAMING_SNAKE_CASE_ : Optional[dict] = None, SCREAMING_SNAKE_CASE_ : int = DEFAULT_BLOCK_SIZE, **SCREAMING_SNAKE_CASE_ : Optional[Any], ):
super().__init__(
fo=SCREAMING_SNAKE_CASE_, mode=SCREAMING_SNAKE_CASE_, target_protocol=SCREAMING_SNAKE_CASE_, target_options=SCREAMING_SNAKE_CASE_, block_size=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case : List[Any] = self.file.__enter__
class a :
def __init__( self : int, SCREAMING_SNAKE_CASE_ : Dict ):
snake_case : Dict = file_
def __enter__( self : Optional[Any] ):
self._file.__enter__()
return self
def __exit__( self : Union[str, Any], *SCREAMING_SNAKE_CASE_ : Dict, **SCREAMING_SNAKE_CASE_ : Tuple ):
self._file.__exit__(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __iter__( self : Union[str, Any] ):
return iter(self._file )
def __snake_case ( self : Union[str, Any] ):
return next(self._file )
def __getattr__( self : Optional[Any], SCREAMING_SNAKE_CASE_ : Optional[int] ):
return getattr(self._file, SCREAMING_SNAKE_CASE_ )
def fixed_enter(*SCREAMING_SNAKE_CASE_ : Optional[Any], **SCREAMING_SNAKE_CASE_ : List[Any] ):
return WrappedFile(_enter(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) )
snake_case : Union[str, Any] = fixed_enter
| 555
| 1
|
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self , A_=0.01 , A_=1000 ) -> List[str]:
"""simple docstring"""
_lowercase: Any = p_stop
_lowercase: List[Any] = max_length
def __iter__( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: str = 0
_lowercase: List[str] = False
while not stop and count < self.max_length:
yield count
count += 1
_lowercase: Any = random.random() < self.p_stop
class __magic_name__ ( unittest.TestCase ):
def lowercase_ ( self , A_ , A_ , A_=False , A_=True ) -> Any:
"""simple docstring"""
_lowercase: List[Any] = [
BatchSamplerShard(A_ , 2 , A_ , split_batches=A_ , even_batches=A_ )
for i in range(2 )
]
_lowercase: Optional[int] = [list(A_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(A_ ) for shard in batch_sampler_shards] , [len(A_ ) for e in expected] )
self.assertListEqual(A_ , A_ )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: List[str] = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ )
_lowercase: List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A_ , A_ )
_lowercase: str = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ )
# Expected shouldn't change
self.check_batch_sampler_shards(A_ , A_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowercase: Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ )
_lowercase: Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(A_ , A_ )
_lowercase: Optional[int] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ )
_lowercase: str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowercase: Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ )
_lowercase: List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(A_ , A_ )
_lowercase: List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ )
_lowercase: Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowercase: Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ )
_lowercase: int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(A_ , A_ )
_lowercase: Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ )
_lowercase: Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ )
# Check the shards when the dataset is very small.
_lowercase: Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ )
_lowercase: int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(A_ , A_ )
_lowercase: Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ )
_lowercase: Optional[int] = [[], []]
self.check_batch_sampler_shards(A_ , A_ )
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ )
_lowercase: List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
_lowercase: str = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ )
# Expected shouldn't change
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size.
_lowercase: Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ )
_lowercase: int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
_lowercase: List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ )
_lowercase: str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowercase: Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ )
_lowercase: Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
_lowercase: Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ )
_lowercase: str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
# Check the shards when the dataset is very small.
_lowercase: List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ )
_lowercase: Optional[int] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
_lowercase: List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ )
_lowercase: str = [[], []]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: str = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ )
_lowercase: str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
_lowercase: List[str] = BatchSampler(range(24 ) , batch_size=3 , drop_last=A_ )
# Expected shouldn't change
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowercase: str = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ )
_lowercase: str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
_lowercase: Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A_ )
_lowercase: Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowercase: Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ )
_lowercase: Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
_lowercase: Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=A_ )
_lowercase: Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowercase: Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ )
_lowercase: Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
_lowercase: Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=A_ )
_lowercase: List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
# Check the shards when the dataset is very small.
_lowercase: int = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ )
_lowercase: List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
_lowercase: Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A_ )
_lowercase: str = [[], []]
self.check_batch_sampler_shards(A_ , A_ , even_batches=A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ )
_lowercase: int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
_lowercase: int = BatchSampler(range(24 ) , batch_size=4 , drop_last=A_ )
# Expected shouldn't change
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size.
_lowercase: Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ )
_lowercase: Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
_lowercase: Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=A_ )
_lowercase: Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowercase: List[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ )
_lowercase: List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
_lowercase: Optional[int] = BatchSampler(range(21 ) , batch_size=4 , drop_last=A_ )
_lowercase: Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
# Check the shards when the dataset is very small.
_lowercase: List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ )
_lowercase: Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
_lowercase: Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=A_ )
_lowercase: Optional[Any] = [[], []]
self.check_batch_sampler_shards(A_ , A_ , split_batches=A_ , even_batches=A_ )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: List[Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_lowercase: List[str] = [BatchSamplerShard(A_ , 2 , A_ , even_batches=A_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase_ ( self , A_ , A_ , A_ , A_=False , A_=2 , A_=False ) -> Optional[int]:
"""simple docstring"""
random.seed(A_ )
_lowercase: List[Any] = list(A_ )
_lowercase: Optional[int] = [
IterableDatasetShard(
A_ , batch_size=A_ , drop_last=A_ , num_processes=A_ , process_index=A_ , split_batches=A_ , )
for i in range(A_ )
]
_lowercase: Tuple = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(A_ )
iterable_dataset_lists.append(list(A_ ) )
_lowercase: Dict = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowercase: Optional[int] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(A_ ) , len(A_ ) )
self.assertTrue(len(A_ ) % shard_batch_size == 0 )
_lowercase: Optional[Any] = []
for idx in range(0 , len(A_ ) , A_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(A_ ) < len(A_ ):
reference += reference
self.assertListEqual(A_ , reference[: len(A_ )] )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: int = 42
_lowercase: int = RandomIterableDataset()
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
# Edge case with a very small dataset
_lowercase: List[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
self.check_iterable_dataset_shards(A_ , A_ , batch_size=4 , drop_last=A_ , split_batches=A_ )
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: str = BatchSampler(range(16 ) , batch_size=4 , drop_last=A_ )
_lowercase: Union[str, Any] = SkipBatchSampler(A_ , 2 )
self.assertListEqual(list(A_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Union[str, Any] = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: List[Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
_lowercase: Dict = skip_first_batches(A_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Optional[int] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(A_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase_ ( self ) -> str:
"""simple docstring"""
Accelerator()
_lowercase: Optional[int] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(A_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 353
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A__ : Any = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
A__ : Tuple = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Any = calculate_rouge(_UpperCamelCase , _UpperCamelCase , bootstrap_aggregation=_UpperCamelCase , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
_lowercase: List[Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , bootstrap_aggregation=_UpperCamelCase , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Dict = '''rougeLsum'''
_lowercase: Dict = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=[k] )[k]
_lowercase: List[str] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Tuple = ['''rouge1''', '''rouge2''', '''rougeL''']
_lowercase: Dict = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=_UpperCamelCase )
_lowercase: Optional[int] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=_UpperCamelCase )
assert score_sep == score_no_sep
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Union[str, Any] = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
_lowercase: Union[str, Any] = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase ) == calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: int = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
_lowercase: Union[str, Any] = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
_lowercase: List[Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , rouge_keys=['''rougeLsum'''] , newline_sep=_UpperCamelCase )['''rougeLsum''']
_lowercase: Union[str, Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[str] = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
_lowercase: int = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
_lowercase: Optional[int] = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
| 353
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (UpperCamelCase__ ):
def __init__( self : Tuple , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Any ) -> None:
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 717
|
"""simple docstring"""
class UpperCamelCase_ :
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=None ) -> int:
UpperCAmelCase_ : int = data
UpperCAmelCase_ : Optional[int] = previous
UpperCAmelCase_ : int = next_node
def __str__( self : Dict ) -> str:
return f"""{self.data}"""
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.data
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return self.next
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
return self.previous
class UpperCamelCase_ :
def __init__( self : Any , lowerCAmelCase_ : str ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = head
def __iter__( self : Any ) -> List[str]:
return self
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
if not self.current:
raise StopIteration
else:
UpperCAmelCase_ : Optional[int] = self.current.get_data()
UpperCAmelCase_ : Dict = self.current.get_next()
return value
class UpperCamelCase_ :
def __init__( self : List[str] ) -> Tuple:
UpperCAmelCase_ : Tuple = None # First node in list
UpperCAmelCase_ : Union[str, Any] = None # Last node in list
def __str__( self : List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.head
UpperCAmelCase_ : int = []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase_ : Optional[Any] = current.get_next()
return " ".join(str(lowerCAmelCase_ ) for node in nodes )
def __contains__( self : int , lowerCAmelCase_ : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase_ : Union[str, Any] = current.get_next()
return False
def __iter__( self : int ) -> Tuple:
return LinkedListIterator(self.head )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
if self.head:
return self.head.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node ) -> None:
if self.head is None:
UpperCAmelCase_ : Optional[int] = node
UpperCAmelCase_ : Union[str, Any] = node
else:
self.insert_before_node(self.head , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Node ) -> None:
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.insert_after_node(self.tail , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : Optional[Any] = Node(lowerCAmelCase_ )
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.set_tail(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> None:
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Tuple = node.previous
if node.get_previous() is None:
UpperCAmelCase_ : List[Any] = node_to_insert
else:
UpperCAmelCase_ : Dict = node_to_insert
UpperCAmelCase_ : Dict = node_to_insert
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> None:
UpperCAmelCase_ : Dict = node
UpperCAmelCase_ : int = node.next
if node.get_next() is None:
UpperCAmelCase_ : int = node_to_insert
else:
UpperCAmelCase_ : Optional[Any] = node_to_insert
UpperCAmelCase_ : Any = node_to_insert
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = Node(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCAmelCase_ , lowerCAmelCase_ )
return
current_position += 1
UpperCAmelCase_ : Optional[Any] = node.next
self.insert_after_node(self.tail , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : int ) -> Node:
UpperCAmelCase_ : List[Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase_ : Optional[int] = node.get_next()
raise Exception("Node not found" )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
if (node := self.get_node(lowerCAmelCase_ )) is not None:
if node == self.head:
UpperCAmelCase_ : Tuple = self.head.get_next()
if node == self.tail:
UpperCAmelCase_ : Optional[int] = self.tail.get_previous()
self.remove_node_pointers(lowerCAmelCase_ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : Node ) -> None:
if node.get_next():
UpperCAmelCase_ : int = node.previous
if node.get_previous():
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Dict = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return self.head is None
def snake_case ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 463
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : Dict = logging.get_logger(__name__)
_A : int = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = """instructblip_vision_model"""
def __init__( self , A_=14_08 , A_=61_44 , A_=39 , A_=16 , A_=2_24 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = qkv_bias
@classmethod
def lowercase_ ( cls , A_ , **A_ ):
'''simple docstring'''
cls._set_token_in_kwargs(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
SCREAMING_SNAKE_CASE__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A_ , **A_ )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = """instructblip_qformer"""
def __init__( self , A_=3_05_22 , A_=7_68 , A_=12 , A_=12 , A_=30_72 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=14_08 , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , **A_ )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = cross_attention_frequency
SCREAMING_SNAKE_CASE__ = encoder_hidden_size
@classmethod
def lowercase_ ( cls , A_ , **A_ ):
'''simple docstring'''
cls._set_token_in_kwargs(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(A_ , **A_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
SCREAMING_SNAKE_CASE__ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A_ , **A_ )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : str = """instructblip"""
lowerCamelCase__ : List[Any] = True
def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ):
'''simple docstring'''
super().__init__(**A_ )
if vision_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
SCREAMING_SNAKE_CASE__ = InstructBlipVisionConfig(**A_ )
SCREAMING_SNAKE_CASE__ = InstructBlipQFormerConfig(**A_ )
SCREAMING_SNAKE_CASE__ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[text_model_type](**A_ )
SCREAMING_SNAKE_CASE__ = self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE__ = self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE__ = num_query_tokens
SCREAMING_SNAKE_CASE__ = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE__ = 1.0
SCREAMING_SNAKE_CASE__ = 0.02
@classmethod
def lowercase_ ( cls , A_ , A_ , A_ , **A_ , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 100
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 603
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
snake_case__ : Optional[Any] = random.Random()
def __lowerCamelCase ( A__ : List[Any] , A__ : Optional[Any]=1.0 , A__ : List[str]=None , A__ : Optional[Any]=None ) -> Tuple:
if rng is None:
lowerCamelCase_ : List[str] = global_rng
lowerCamelCase_ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : Any , __a : Optional[int]=7 , __a : str=400 , __a : Optional[int]=2_000 , __a : Union[str, Any]=1 , __a : Any=0.0 , __a : Optional[Any]=16_000 , __a : Dict=True , __a : Union[str, Any]=80 , __a : Union[str, Any]=16 , __a : Any=64 , __a : Tuple="hann_window" , __a : Union[str, Any]=80 , __a : str=7_600 , __a : Any=1e-10 , __a : Dict=True , ) ->int:
lowerCamelCase_ : Tuple = parent
lowerCamelCase_ : Union[str, Any] = batch_size
lowerCamelCase_ : Union[str, Any] = min_seq_length
lowerCamelCase_ : Any = max_seq_length
lowerCamelCase_ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ : Optional[Any] = feature_size
lowerCamelCase_ : Union[str, Any] = padding_value
lowerCamelCase_ : str = sampling_rate
lowerCamelCase_ : List[str] = do_normalize
lowerCamelCase_ : Optional[Any] = num_mel_bins
lowerCamelCase_ : Optional[int] = hop_length
lowerCamelCase_ : int = win_length
lowerCamelCase_ : Optional[int] = win_function
lowerCamelCase_ : Optional[int] = fmin
lowerCamelCase_ : int = fmax
lowerCamelCase_ : List[Any] = mel_floor
lowerCamelCase_ : Any = return_attention_mask
def _lowerCAmelCase ( self : Dict ) ->int:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowerCAmelCase ( self : str , __a : Optional[int]=False , __a : Union[str, Any]=False ) ->Any:
def _flatten(__a : List[str] ):
return list(itertools.chain(*__a ) )
if equal_length:
lowerCamelCase_ : List[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCamelCase_ : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ : Union[str, Any] = [np.asarray(__a ) for x in speech_inputs]
return speech_inputs
def _lowerCAmelCase ( self : int , __a : Union[str, Any]=False , __a : Optional[int]=False ) ->Optional[Any]:
if equal_length:
lowerCamelCase_ : Union[str, Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ : List[str] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ : int = [np.asarray(__a ) for x in speech_inputs]
return speech_inputs
@require_torch
class SCREAMING_SNAKE_CASE_ (a__ , unittest.TestCase ):
'''simple docstring'''
_a = SpeechTaFeatureExtractor
def _lowerCAmelCase ( self : Optional[int] ) ->Tuple:
lowerCamelCase_ : Tuple = SpeechTaFeatureExtractionTester(self )
def _lowerCAmelCase ( self : List[Any] , __a : Union[str, Any] ) ->Dict:
self.assertTrue(np.all(np.mean(__a , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__a , axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCAmelCase ( self : Any ) ->Dict:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase_ : Union[str, Any] = [np.asarray(__a ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase_ : Any = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
lowerCamelCase_ : Optional[int] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test batched
lowerCamelCase_ : Optional[Any] = feat_extract(__a , return_tensors="""np""" ).input_values
lowerCamelCase_ : Tuple = feat_extract(__a , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
def _lowerCAmelCase ( self : Any ) ->Union[str, Any]:
lowerCamelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase_ : str = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase_ : str = [None, 1_600, None]
for max_length, padding in zip(__a , __a ):
lowerCamelCase_ : Optional[Any] = feat_extract(__a , padding=__a , max_length=__a , return_tensors="""np""" )
lowerCamelCase_ : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _lowerCAmelCase ( self : Any ) ->Optional[int]:
lowerCamelCase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ : List[Any] = range(800 , 1_400 , 200 )
lowerCamelCase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths]
lowerCamelCase_ : Any = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase_ : Tuple = [None, 1_600, None]
for max_length, padding in zip(__a , __a ):
lowerCamelCase_ : Optional[Any] = feat_extract(__a , max_length=__a , padding=__a )
lowerCamelCase_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _lowerCAmelCase ( self : Any ) ->Dict:
lowerCamelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase_ : Tuple = feat_extract(
__a , truncation=__a , max_length=1_000 , padding="""max_length""" , return_tensors="""np""" )
lowerCamelCase_ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCAmelCase ( self : Dict ) ->Optional[Any]:
lowerCamelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase_ : List[Any] = feat_extract(
__a , truncation=__a , max_length=1_000 , padding="""longest""" , return_tensors="""np""" )
lowerCamelCase_ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
lowerCamelCase_ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase_ : int = feat_extract(
__a , truncation=__a , max_length=2_000 , padding="""longest""" , return_tensors="""np""" )
lowerCamelCase_ : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def _lowerCAmelCase ( self : Dict ) ->str:
lowerCamelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ : str = np.random.rand(100 ).astype(np.floataa )
lowerCamelCase_ : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ : Union[str, Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCamelCase_ : Dict = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowerCAmelCase ( self : Any ) ->Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase_ : Optional[Any] = [np.asarray(__a ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ : List[str] = feature_extractor(audio_target=__a , padding=__a , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCamelCase_ : int = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
lowerCamelCase_ : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test batched
lowerCamelCase_ : Any = feature_extractor(__a , return_tensors="""np""" ).input_values
lowerCamelCase_ : Any = feature_extractor(__a , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ : Tuple = np.asarray(__a )
lowerCamelCase_ : List[Any] = feature_extractor(__a , return_tensors="""np""" ).input_values
lowerCamelCase_ : str = feature_extractor(__a , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
def _lowerCAmelCase ( self : int ) ->Tuple:
lowerCamelCase_ : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ : Optional[int] = feat_extract.model_input_names[0]
lowerCamelCase_ : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__a ) == len(__a ) for x, y in zip(__a , processed_features[input_name] ) ) )
lowerCamelCase_ : Any = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__a )
lowerCamelCase_ : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
lowerCamelCase_ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCAmelCase ( self : int ) ->Dict:
lowerCamelCase_ : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__a )
lowerCamelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ : Optional[Any] = feat_extract.model_input_names[0]
lowerCamelCase_ : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
lowerCamelCase_ : Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
lowerCamelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ : Any = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ : List[str] = feat_extract.model_input_names[0]
lowerCamelCase_ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ : List[Any] = feat_extract.num_mel_bins # hack!
lowerCamelCase_ : str = feat_extract.pad(__a , padding="""longest""" , return_tensors="""np""" )[input_name]
lowerCamelCase_ : Dict = feat_extract.pad(__a , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCAmelCase ( self : Optional[int] ) ->Tuple:
lowerCamelCase_ : int = self.feat_extract_dict
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : Tuple = self.feature_extraction_class(**__a )
lowerCamelCase_ : int = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ : str = [len(__a ) for x in speech_inputs]
lowerCamelCase_ : Any = feat_extract.model_input_names[0]
lowerCamelCase_ : Dict = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ : Tuple = feat_extract.num_mel_bins # hack!
lowerCamelCase_ : Any = feat_extract.pad(__a , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __a )
def _lowerCAmelCase ( self : Optional[int] ) ->int:
lowerCamelCase_ : Optional[Any] = self.feat_extract_dict
lowerCamelCase_ : int = True
lowerCamelCase_ : List[Any] = self.feature_extraction_class(**__a )
lowerCamelCase_ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ : str = [len(__a ) for x in speech_inputs]
lowerCamelCase_ : List[Any] = feat_extract.model_input_names[0]
lowerCamelCase_ : List[Any] = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ : str = min(__a )
lowerCamelCase_ : List[Any] = feat_extract.num_mel_bins # hack!
lowerCamelCase_ : List[str] = feat_extract.pad(
__a , padding="""max_length""" , max_length=__a , truncation=__a , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowerCAmelCase ( self : Any , __a : Optional[Any] ) ->Optional[int]:
from datasets import load_dataset
lowerCamelCase_ : Dict = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCamelCase_ : List[str] = ds.sort("""id""" ).select(range(__a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
# fmt: off
lowerCamelCase_ : Optional[Any] = torch.tensor(
[2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03,
3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03,
2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04,
4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03,
7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04,
4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03] )
# fmt: on
lowerCamelCase_ : Dict = self._load_datasamples(1 )
lowerCamelCase_ : Union[str, Any] = SpeechTaFeatureExtractor()
lowerCamelCase_ : str = feature_extractor(__a , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , __a , atol=1e-6 ) )
def _lowerCAmelCase ( self : Optional[int] ) ->int:
# fmt: off
lowerCamelCase_ : Optional[Any] = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
lowerCamelCase_ : Optional[int] = self._load_datasamples(1 )
lowerCamelCase_ : Any = SpeechTaFeatureExtractor()
lowerCamelCase_ : List[Any] = feature_extractor(audio_target=__a , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __a , atol=1e-4 ) )
| 171
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCamelCase ( A__ : int , A__ : Dict , A__ : int=0 ) -> Union[str, Any]:
# Format the message.
if name is None:
lowerCamelCase_ : Optional[Any] = None
else:
lowerCamelCase_ : int = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
lowerCamelCase_ : Tuple = fmt.format(A__ )
# Print and recurse (if needed).
if isinstance(A__ , A__ ):
if msg is not None:
print(A__ )
for k in val.keys():
recursive_print(A__ , val[k] , spaces + 2 )
elif isinstance(A__ , torch.Tensor ):
print(A__ , """:""" , val.size() )
else:
print(A__ , """:""" , A__ )
def __lowerCamelCase ( A__ : Tuple , A__ : Dict , A__ : str , A__ : Any , A__ : List[str] ) -> List[str]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCamelCase_ : int = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCamelCase_ : int = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCamelCase_ : Union[str, Any] = param.view(*A__ )
lowerCamelCase_ : List[Any] = param.transpose(0 , 2 )
lowerCamelCase_ : List[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCamelCase_ : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCamelCase_ : List[str] = param.view(*A__ )
lowerCamelCase_ : Any = param.transpose(0 , 1 ).contiguous()
lowerCamelCase_ : Optional[Any] = param.view(*A__ )
return param
def __lowerCamelCase ( A__ : Union[str, Any] , A__ : Tuple , A__ : str ) -> Dict:
# The converted output model.
lowerCamelCase_ : Tuple = {}
# old versions did not store training args
lowerCamelCase_ : Any = input_state_dict.get("""args""" , A__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCamelCase_ : Union[str, Any] = ds_args.padded_vocab_size
lowerCamelCase_ : List[str] = ds_args.max_position_embeddings
lowerCamelCase_ : Tuple = ds_args.hidden_size
lowerCamelCase_ : List[Any] = ds_args.num_layers
lowerCamelCase_ : Optional[int] = ds_args.num_attention_heads
lowerCamelCase_ : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCamelCase_ : List[Any] = config.n_head
# The hidden_size per head.
lowerCamelCase_ : str = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCamelCase_ : Tuple = input_state_dict["""checkpoint_version"""]
else:
lowerCamelCase_ : Union[str, Any] = 0.0
# The model.
lowerCamelCase_ : Optional[Any] = input_state_dict["""model"""]
# The language model.
lowerCamelCase_ : str = model["""language_model"""]
# The embeddings.
lowerCamelCase_ : Any = lm["""embedding"""]
# The word embeddings.
lowerCamelCase_ : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowerCamelCase_ : Tuple = word_embeddings[: config.vocab_size, :]
lowerCamelCase_ : str = word_embeddings
# The position embeddings.
lowerCamelCase_ : Any = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCamelCase_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCamelCase_ : List[str] = pos_embeddings
# The transformer.
lowerCamelCase_ : List[str] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowerCamelCase_ : Any = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
lowerCamelCase_ : int = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCamelCase_ : Any = layer_re.match(A__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCamelCase_ : Dict = int(m.group(1 ) )
# The name of the operation.
lowerCamelCase_ : List[str] = m.group(2 )
# Is it a weight or a bias?
lowerCamelCase_ : Any = m.group(3 )
# The name of the layer.
lowerCamelCase_ : List[str] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
lowerCamelCase_ : Any = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
lowerCamelCase_ : Union[str, Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCamelCase_ : int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , A__ , A__ )
lowerCamelCase_ : int = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCamelCase_ : Any = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCamelCase_ : Tuple = masked_bias
lowerCamelCase_ : Optional[Any] = fix_query_key_value_ordering(A__ , A__ , 3 , A__ , A__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCamelCase_ : Optional[Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCamelCase_ : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCamelCase_ : List[Any] = fix_query_key_value_ordering(A__ , A__ , 3 , A__ , A__ )
# Store. No change of shape.
lowerCamelCase_ : List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCamelCase_ : Union[str, Any] = megatron_to_transformers[op_name]
lowerCamelCase_ : Any = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCamelCase_ : List[str] = megatron_to_transformers[op_name]
lowerCamelCase_ : str = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCamelCase_ : List[str] = transformer["""final_layernorm.weight"""]
lowerCamelCase_ : Union[str, Any] = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCamelCase_ : int = word_embeddings
# It should be done!
return output_state_dict
def __lowerCamelCase ( ) -> Optional[Any]:
# Create the argument parser.
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=A__ , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=A__ , help="""An optional config json file describing the pre-trained model.""" , )
lowerCamelCase_ : Optional[int] = parser.parse_args()
# Extract the basename.
lowerCamelCase_ : List[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
lowerCamelCase_ : str = torch.load(A__ , map_location="""cpu""" )
else:
lowerCamelCase_ : Optional[int] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
lowerCamelCase_ : Optional[int] = input_state_dict.get("""args""" , A__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCamelCase_ : int = """gelu_fast"""
elif ds_args.openai_gelu:
lowerCamelCase_ : Optional[Any] = """gelu_new"""
else:
lowerCamelCase_ : Union[str, Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowerCamelCase_ : List[Any] = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowerCamelCase_ : Any = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=A__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=A__ , summary_activation=A__ , summary_proj_to_labels=A__ , summary_first_dropout=0.1 , scale_attn_weights=A__ , use_cache=A__ , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
lowerCamelCase_ : Any = GPTaConfig.from_json_file(args.config_file )
lowerCamelCase_ : Optional[int] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
lowerCamelCase_ : Dict = convert_megatron_checkpoint(A__ , A__ , A__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(A__ , A__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCamelCase_ : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCamelCase_ : List[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowerCamelCase_ : Tuple = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCamelCase_ : Any = """gpt2"""
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(A__ )
lowerCamelCase_ : str = type(A__ ).__name__
lowerCamelCase_ : List[str] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(A__ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(A__ )
# Store the state_dict to file.
lowerCamelCase_ : Union[str, Any] = os.path.join(A__ , """pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(A__ , A__ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 171
| 1
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
return EnvironmentCommand()
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
return EnvironmentCommand(args.accelerate_config_file )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@staticmethod
def _lowercase ( __A : ArgumentParser ):
snake_case__ : List[str] = parser.add_parser("env" )
download_parser.set_defaults(func=__A )
download_parser.add_argument(
"--accelerate-config_file" , default=__A , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=__A )
def __init__( self : Optional[int] , __A : Union[str, Any] , *__A : Tuple ):
snake_case__ : Dict = accelerate_config_file
def _lowercase ( self : Tuple ):
snake_case__ : Optional[int] = "not installed"
if is_safetensors_available():
import safetensors
snake_case__ : Union[str, Any] = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
snake_case__ : Tuple = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
snake_case__ : int = "not installed"
snake_case__ : Optional[Any] = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
snake_case__ : Union[str, Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__A ):
snake_case__ : int = load_config_from_file(self._accelerate_config_file ).to_dict()
snake_case__ : Any = (
"\n".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(__A , __A )
else f'''\t{accelerate_config}'''
)
snake_case__ : int = "not installed"
snake_case__ : Tuple = "NA"
if is_torch_available():
import torch
snake_case__ : Union[str, Any] = torch.__version__
snake_case__ : List[Any] = torch.cuda.is_available()
snake_case__ : Optional[int] = "not installed"
snake_case__ : Dict = "NA"
if is_tf_available():
import tensorflow as tf
snake_case__ : Dict = tf.__version__
try:
# deprecated in v2.1
snake_case__ : Dict = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
snake_case__ : Tuple = bool(tf.config.list_physical_devices("GPU" ) )
snake_case__ : List[Any] = "not installed"
snake_case__ : Tuple = "not installed"
snake_case__ : Union[str, Any] = "not installed"
snake_case__ : Optional[int] = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
snake_case__ : List[str] = flax.__version__
snake_case__ : Dict = jax.__version__
snake_case__ : Tuple = jaxlib.__version__
snake_case__ : str = jax.lib.xla_bridge.get_backend().platform
snake_case__ : Optional[Any] = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f'''{safetensors_version}''',
"Accelerate version": f'''{accelerate_version}''',
"Accelerate config": f'''{accelerate_config_str}''',
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"Tensorflow version (GPU?)": f'''{tf_version} ({tf_cuda_available})''',
"Flax version (CPU?/GPU?/TPU?)": f'''{flax_version} ({jax_backend})''',
"Jax version": f'''{jax_version}''',
"JaxLib version": f'''{jaxlib_version}''',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(__A ) )
return info
@staticmethod
def _lowercase ( __A : Optional[Any] ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 297
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
# initialize config
if "resnet-50" in model_name:
snake_case__ : List[Any] = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
snake_case__ : Dict = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
snake_case__ : str = DetrConfig(use_timm_backbone=snake_case_ , backbone_config=snake_case_ )
# set label attributes
snake_case__ : Optional[int] = "panoptic" in model_name
if is_panoptic:
snake_case__ : Any = 250
else:
snake_case__ : str = 91
snake_case__ : str = "huggingface/label-files"
snake_case__ : Union[str, Any] = "coco-detection-id2label.json"
snake_case__ : List[Any] = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
snake_case__ : Union[str, Any] = {int(snake_case_ ): v for k, v in idalabel.items()}
snake_case__ : Dict = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : str ):
snake_case__ : str = state_dict.pop(snake_case_ )
snake_case__ : str = val
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : str=False ):
snake_case__ : List[Any] = ""
if is_panoptic:
snake_case__ : Union[str, Any] = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case__ : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : str = in_proj_weight[:256, :]
snake_case__ : str = in_proj_bias[:256]
snake_case__ : Dict = in_proj_weight[256:512, :]
snake_case__ : List[str] = in_proj_bias[256:512]
snake_case__ : int = in_proj_weight[-256:, :]
snake_case__ : List[str] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case__ : Tuple = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case__ : List[str] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Dict = in_proj_weight[:256, :]
snake_case__ : Union[str, Any] = in_proj_bias[:256]
snake_case__ : Optional[int] = in_proj_weight[256:512, :]
snake_case__ : Dict = in_proj_bias[256:512]
snake_case__ : int = in_proj_weight[-256:, :]
snake_case__ : Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
snake_case__ : Dict = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
snake_case__ : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case__ : Tuple = in_proj_weight_cross_attn[:256, :]
snake_case__ : List[str] = in_proj_bias_cross_attn[:256]
snake_case__ : Dict = in_proj_weight_cross_attn[256:512, :]
snake_case__ : Optional[Any] = in_proj_bias_cross_attn[256:512]
snake_case__ : Optional[int] = in_proj_weight_cross_attn[-256:, :]
snake_case__ : Optional[int] = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ : List[Any] = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=None , snake_case_ : str=False ):
snake_case__, snake_case__ : Optional[int] = get_detr_config(snake_case_ )
# load original model from torch hub
snake_case__ : Tuple = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
snake_case__ : int = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=snake_case_ ).eval()
snake_case__ : List[Any] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case_ ):
if is_panoptic:
snake_case__ : Any = "detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : Union[str, Any] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
snake_case__ : Tuple = state_dict.pop(snake_case_ )
snake_case__ : Optional[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : Optional[Any] = state_dict.pop(snake_case_ )
snake_case__ : Optional[int] = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
snake_case__ : str = state_dict.pop(snake_case_ )
snake_case__ : Dict = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
snake_case__ : Any = state_dict.pop(snake_case_ )
snake_case__ : Any = val
# finally, create HuggingFace model and load state dict
snake_case__ : str = DetrForSegmentation(snake_case_ ) if is_panoptic else DetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# verify our conversion on an image
snake_case__ : Tuple = "coco_panoptic" if is_panoptic else "coco_detection"
snake_case__ : List[Any] = DetrImageProcessor(format=snake_case_ )
snake_case__ : str = processor(images=prepare_img() , return_tensors="pt" )
snake_case__ : List[str] = encoding["pixel_values"]
snake_case__ : Union[str, Any] = detr(snake_case_ )
snake_case__ : str = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
__lowerCamelCase : Dict = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 297
| 1
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _A( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Optional[int] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
__A : Any = VideoClassificationPipeline(model=_A , image_processor=_A , top_k=2 )
__A : Any = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def UpperCAmelCase_ ( self , _A , _A ):
for example in examples:
__A : Optional[Any] = video_classifier(_A )
self.assertEqual(
_A , [
{'score': ANY(_A ), 'label': ANY(_A )},
{'score': ANY(_A ), 'label': ANY(_A )},
] , )
@require_torch
def UpperCAmelCase_ ( self ):
__A : Dict = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
__A : Optional[Any] = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
__A : List[str] = pipeline(
'video-classification' , model=_A , feature_extractor=_A , frame_sampling_rate=4 )
__A : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
__A : Optional[int] = video_classifier(_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}] , )
__A : str = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}],
[{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}],
] , )
@require_tf
def UpperCAmelCase_ ( self ):
pass
| 77
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : Optional[int] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 77
| 1
|
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowercase ( __snake_case ,__snake_case=False ) -> List[Any]:
try:
__lowerCAmelCase : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCAmelCase : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCAmelCase : List[Any] = strtobool(_UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__snake_case : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
__snake_case : int = parse_flag_from_env('RUN_REMOTE', default=False)
__snake_case : List[str] = parse_flag_from_env('RUN_LOCAL', default=True)
__snake_case : Dict = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
__snake_case : Tuple = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
__snake_case : Dict = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
__snake_case : Optional[int] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
__snake_case : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
__snake_case : int = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
__snake_case : Dict = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
__snake_case : Optional[int] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def _lowercase ( __snake_case ) -> List[Any]:
try:
import faiss # noqa
except ImportError:
__lowerCAmelCase : Union[str, Any] = unittest.skip("test requires faiss" )(_UpperCAmelCase )
return test_case
def _lowercase ( __snake_case ) -> List[str]:
try:
import regex # noqa
except ImportError:
__lowerCAmelCase : List[Any] = unittest.skip("test requires regex" )(_UpperCAmelCase )
return test_case
def _lowercase ( __snake_case ) -> Dict:
try:
import elasticsearch # noqa
except ImportError:
__lowerCAmelCase : Tuple = unittest.skip("test requires elasticsearch" )(_UpperCAmelCase )
return test_case
def _lowercase ( __snake_case ) -> Any:
try:
import sqlalchemy # noqa
except ImportError:
__lowerCAmelCase : Tuple = unittest.skip("test requires sqlalchemy" )(_UpperCAmelCase )
return test_case
def _lowercase ( __snake_case ) -> List[str]:
if not config.TORCH_AVAILABLE:
__lowerCAmelCase : List[Any] = unittest.skip("test requires PyTorch" )(_UpperCAmelCase )
return test_case
def _lowercase ( __snake_case ) -> Optional[int]:
if not config.TF_AVAILABLE:
__lowerCAmelCase : Any = unittest.skip("test requires TensorFlow" )(_UpperCAmelCase )
return test_case
def _lowercase ( __snake_case ) -> Tuple:
if not config.JAX_AVAILABLE:
__lowerCAmelCase : Tuple = unittest.skip("test requires JAX" )(_UpperCAmelCase )
return test_case
def _lowercase ( __snake_case ) -> int:
if not config.PIL_AVAILABLE:
__lowerCAmelCase : Any = unittest.skip("test requires Pillow" )(_UpperCAmelCase )
return test_case
def _lowercase ( __snake_case ) -> Union[str, Any]:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(_UpperCAmelCase )
else:
return test_case
def _lowercase ( __snake_case ) -> Any:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(_UpperCAmelCase )
else:
return test_case
def _lowercase ( __snake_case ) -> Optional[Any]:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(_UpperCAmelCase )
else:
return test_case
def _lowercase ( __snake_case ) -> List[str]:
def _require_spacy_model(__snake_case ):
try:
import spacy # noqa F401
spacy.load(_UpperCAmelCase )
except ImportError:
return unittest.skip("test requires spacy" )(_UpperCAmelCase )
except OSError:
return unittest.skip("test requires spacy model \'{}\'".format(_UpperCAmelCase ) )(_UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def _lowercase ( __snake_case ) -> Optional[int]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(_UpperCAmelCase )
else:
return test_case
def _lowercase ( __snake_case ) -> int:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(_UpperCAmelCase )
else:
return test_case
def _lowercase ( __snake_case ) -> int:
if not _run_slow_tests or _run_slow_tests == 0:
__lowerCAmelCase : Tuple = unittest.skip("test is slow" )(_UpperCAmelCase )
return test_case
def _lowercase ( __snake_case ) -> Union[str, Any]:
if not _run_local_tests or _run_local_tests == 0:
__lowerCAmelCase : List[Any] = unittest.skip("test is local" )(_UpperCAmelCase )
return test_case
def _lowercase ( __snake_case ) -> str:
if not _run_packaged_tests or _run_packaged_tests == 0:
__lowerCAmelCase : Any = unittest.skip("test is packaged" )(_UpperCAmelCase )
return test_case
def _lowercase ( __snake_case ) -> List[str]:
if not _run_remote_tests or _run_remote_tests == 0:
__lowerCAmelCase : str = unittest.skip("test requires remote" )(_UpperCAmelCase )
return test_case
def _lowercase ( *__snake_case ) -> List[Any]:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_UpperCAmelCase ) and name.startswith("test" ):
for decorator in decorators:
__lowerCAmelCase : Optional[int] = decorator(_UpperCAmelCase )
setattr(cls ,_UpperCAmelCase ,_UpperCAmelCase )
return cls
return decorate
class A__ ( A__ ):
'''simple docstring'''
pass
class A__ ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
@contextmanager
def _lowercase ( __snake_case=OfflineSimulationMode.CONNECTION_FAILS ,__snake_case=1e-1_6 ) -> List[str]:
__lowerCAmelCase : Tuple = requests.Session().request
def timeout_request(__snake_case ,__snake_case ,__snake_case ,**__snake_case ):
# Change the url to an invalid url so that the connection hangs
__lowerCAmelCase : str = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__lowerCAmelCase : Tuple = timeout
try:
return online_request(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__lowerCAmelCase : Tuple = url
__lowerCAmelCase : Optional[int] = e.args[0]
__lowerCAmelCase : List[str] = (max_retry_error.args[0].replace("10.255.255.1" ,F"""OfflineMock[{url}]""" ),)
__lowerCAmelCase : Optional[int] = (max_retry_error,)
raise
def raise_connection_error(__snake_case ,__snake_case ,**__snake_case ):
raise requests.ConnectionError("Offline mode is enabled." ,request=_UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" ,_UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" ,_UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" ,_UpperCAmelCase ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def _lowercase ( *__snake_case ,**__snake_case ) -> str:
__lowerCAmelCase : Optional[Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_UpperCAmelCase ,**_UpperCAmelCase ) as tmp_dir:
try:
os.chdir(_UpperCAmelCase )
yield
finally:
os.chdir(_UpperCAmelCase )
@contextmanager
def _lowercase ( ) -> str:
import gc
gc.collect()
__lowerCAmelCase : str = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowercase ( ) -> str:
import gc
gc.collect()
__lowerCAmelCase : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
return deepcopy(_UpperCAmelCase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_UpperCAmelCase ).integers(0 ,100 ,10 ).tolist()
def _lowercase ( __snake_case ) -> Optional[Any]:
import decorator
from requests.exceptions import HTTPError
def _wrapper(__snake_case ,*__snake_case ,**__snake_case ):
try:
return func(*_UpperCAmelCase ,**_UpperCAmelCase )
except HTTPError as err:
if str(_UpperCAmelCase ).startswith("500" ) or str(_UpperCAmelCase ).startswith("502" ):
pytest.xfail(str(_UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper ,_UpperCAmelCase )
class A__ :
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = returncode
__lowerCAmelCase : List[str] = stdout
__lowerCAmelCase : List[str] = stderr
async def _lowercase ( __snake_case ,__snake_case ) -> Optional[int]:
while True:
__lowerCAmelCase : Optional[Any] = await stream.readline()
if line:
callback(_UpperCAmelCase )
else:
break
async def _lowercase ( __snake_case ,__snake_case=None ,__snake_case=None ,__snake_case=None ,__snake_case=False ,__snake_case=False ) -> Dict:
if echo:
print("\nRunning: " ," ".join(_UpperCAmelCase ) )
__lowerCAmelCase : Tuple = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_UpperCAmelCase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_UpperCAmelCase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCAmelCase : Tuple = []
__lowerCAmelCase : Any = []
def tee(__snake_case ,__snake_case ,__snake_case ,__snake_case="" ):
__lowerCAmelCase : Dict = line.decode("utf-8" ).rstrip()
sink.append(_UpperCAmelCase )
if not quiet:
print(_UpperCAmelCase ,_UpperCAmelCase ,file=_UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __snake_case : tee(_UpperCAmelCase ,_UpperCAmelCase ,sys.stdout ,label="stdout:" ) ),
_read_stream(p.stderr ,lambda __snake_case : tee(_UpperCAmelCase ,_UpperCAmelCase ,sys.stderr ,label="stderr:" ) ),
] ,timeout=_UpperCAmelCase ,)
return _RunOutput(await p.wait() ,_UpperCAmelCase ,_UpperCAmelCase )
def _lowercase ( __snake_case ,__snake_case=None ,__snake_case=None ,__snake_case=180 ,__snake_case=False ,__snake_case=True ) -> Any:
__lowerCAmelCase : Any = asyncio.get_event_loop()
__lowerCAmelCase : Dict = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase ,env=_UpperCAmelCase ,stdin=_UpperCAmelCase ,timeout=_UpperCAmelCase ,quiet=_UpperCAmelCase ,echo=_UpperCAmelCase ) )
__lowerCAmelCase : Union[str, Any] = " ".join(_UpperCAmelCase )
if result.returncode > 0:
__lowerCAmelCase : List[Any] = "\n".join(result.stderr )
raise RuntimeError(
F"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""\'{cmd_str}\' produced no output.""" )
return result
def _lowercase ( ) -> Optional[int]:
__lowerCAmelCase : Dict = os.environ.get("PYTEST_XDIST_WORKER" ,"gw0" )
__lowerCAmelCase : int = re.sub(r"^gw" ,"" ,_UpperCAmelCase ,0 ,re.M )
return int(_UpperCAmelCase )
def _lowercase ( ) -> Union[str, Any]:
__lowerCAmelCase : List[str] = 29_500
__lowerCAmelCase : Tuple = pytest_xdist_worker_id()
return port + uniq_delta
| 293
|
from __future__ import annotations
a_ : str = []
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
for i in range(len(_UpperCAmelCase)):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase)):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , -1 , -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , len(_UpperCAmelCase))):
if board[i][j] == 1:
return False
return True
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if row >= len(_UpperCAmelCase):
solution.append(_UpperCAmelCase)
printboard(_UpperCAmelCase)
print()
return True
for i in range(len(_UpperCAmelCase)):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 1
solve(_UpperCAmelCase , row + 1)
SCREAMING_SNAKE_CASE = 0
return False
def lowerCamelCase__ (_UpperCAmelCase):
for i in range(len(_UpperCAmelCase)):
for j in range(len(_UpperCAmelCase)):
if board[i][j] == 1:
print('Q' , end=' ')
else:
print('.' , end=' ')
print()
# n=int(input("The no. of queens"))
a_ : Tuple = 8
a_ : int = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 73
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ :Optional[int] = logging.get_logger(__name__)
lowercase__ :List[Any] = {
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
_A : List[Any] = 'dinat'
_A : int = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : str , __lowercase : Any=4 , __lowercase : Tuple=3 , __lowercase : str=64 , __lowercase : Any=[3, 4, 6, 5] , __lowercase : Union[str, Any]=[2, 4, 8, 16] , __lowercase : int=7 , __lowercase : Union[str, Any]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __lowercase : Dict=3.0 , __lowercase : Any=True , __lowercase : Optional[Any]=0.0 , __lowercase : Tuple=0.0 , __lowercase : Any=0.1 , __lowercase : Tuple="gelu" , __lowercase : List[str]=0.0_2 , __lowercase : int=1e-5 , __lowercase : Any=0.0 , __lowercase : List[str]=None , __lowercase : List[str]=None , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(**__lowercase )
__UpperCAmelCase : Tuple = patch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Optional[Any] = embed_dim
__UpperCAmelCase : str = depths
__UpperCAmelCase : str = len(__lowercase )
__UpperCAmelCase : List[Any] = num_heads
__UpperCAmelCase : Optional[int] = kernel_size
__UpperCAmelCase : Optional[Any] = dilations
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : Any = qkv_bias
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = drop_path_rate
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase : Tuple = int(embed_dim * 2 ** (len(__lowercase ) - 1) )
__UpperCAmelCase : Union[str, Any] = layer_scale_init_value
__UpperCAmelCase : Optional[int] = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(__lowercase ) + 1 )]
__UpperCAmelCase : Any = get_aligned_output_features_output_indices(
out_features=__lowercase , out_indices=__lowercase , stage_names=self.stage_names )
| 706
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase__ :int = TypeVar('T')
class snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowercase : list[T] , __lowercase : Callable[[T, T], T] ):
'''simple docstring'''
__UpperCAmelCase : Any | T = None
__UpperCAmelCase : int = len(__lowercase )
__UpperCAmelCase : list[T] = [any_type for _ in range(self.N )] + arr
__UpperCAmelCase : List[Any] = fnc
self.build()
def A_ ( self : str ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
__UpperCAmelCase : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def A_ ( self : Union[str, Any] , __lowercase : int , __lowercase : T ):
'''simple docstring'''
p += self.N
__UpperCAmelCase : Tuple = v
while p > 1:
__UpperCAmelCase : Union[str, Any] = p // 2
__UpperCAmelCase : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def A_ ( self : str , __lowercase : int , __lowercase : int ): # noqa: E741
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : int = l + self.N, r + self.N
__UpperCAmelCase : T | None = None
while l <= r:
if l % 2 == 1:
__UpperCAmelCase : Any = self.st[l] if res is None else self.fn(__lowercase , self.st[l] )
if r % 2 == 0:
__UpperCAmelCase : Any = self.st[r] if res is None else self.fn(__lowercase , self.st[r] )
__UpperCAmelCase , __UpperCAmelCase : Any = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase__ :Any = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase__ :List[Any] = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase__ :List[str] = SegmentTree(test_array, min)
lowercase__ :List[str] = SegmentTree(test_array, max)
lowercase__ :Tuple = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase_ ( ) ->None:
"""simple docstring"""
for i in range(len(UpperCAmelCase_ ) ):
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
__UpperCAmelCase : Optional[Any] = reduce(UpperCAmelCase_ , test_array[i : j + 1] )
__UpperCAmelCase : Optional[int] = reduce(UpperCAmelCase_ , test_array[i : j + 1] )
__UpperCAmelCase : Union[str, Any] = reduce(lambda UpperCAmelCase_ , UpperCAmelCase_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_ )
assert max_range == max_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_ )
assert sum_range == sum_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_ )
test_all_segments()
for index, value in test_updates.items():
lowercase__ :Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 374
| 0
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowercase ( lowerCAmelCase__ : str ) -> Union[str, Any]:
__a = {}
__a = tokenizer(example['''content'''] , truncation=lowerCAmelCase__ )['''input_ids''']
__a = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
lowercase_ = HfArgumentParser(PretokenizationArguments)
lowercase_ = parser.parse_args()
if args.num_workers is None:
lowercase_ = multiprocessing.cpu_count()
lowercase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowercase_ = time.time()
lowercase_ = load_dataset(args.dataset_name, split="train")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowercase_ = time.time()
lowercase_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowercase_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 695
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase : int = 1_0_0_0_0_0_0 ):
lowerCamelCase_ = limit + 1
lowerCamelCase_ = [0] * limit
for first_term in range(1 , _lowerCamelCase ):
for n in range(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCamelCase_ = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__lowercase : List[str] = ["""bert-base-uncased""", """bert-base-cased"""]
__lowercase : Tuple = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowerCAmelCase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = tokenizer
lowerCamelCase_ = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase_ = TFAutoModel.from_config(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(UpperCamelCase__ )
lowerCamelCase_ = self.bert(**UpperCamelCase__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
lowerCamelCase_ = [
BertTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowerCamelCase_ = [TFBertTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(UpperCamelCase__ , use_fast_bert_tokenizer=UpperCamelCase__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase_ = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowerCamelCase_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase_ = tokenizer(UpperCamelCase__ , return_tensors='''tf''' , padding='''longest''' )
lowerCamelCase_ = tf_tokenizer(UpperCamelCase__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf_tokenizer(self.paired_sentences )
lowerCamelCase_ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf.function(UpperCamelCase__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase_ = tf.constant(UpperCamelCase__ )
lowerCamelCase_ = compiled_tokenizer(UpperCamelCase__ )
lowerCamelCase_ = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = ModelToSave(tokenizer=UpperCamelCase__ )
lowerCamelCase_ = tf.convert_to_tensor(self.test_sentences )
lowerCamelCase_ = model(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase_ = Path(UpperCamelCase__ ) / '''saved.model'''
model.save(UpperCamelCase__ )
lowerCamelCase_ = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase_ = loaded_model(UpperCamelCase__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 66
| 1
|
"""simple docstring"""
def UpperCAmelCase ( snake_case : int ):
if not isinstance(snake_case , snake_case ):
_lowerCAmelCase:List[Any] = F'Input value of [number={number}] must be an integer'
raise TypeError(snake_case )
if number < 0:
return False
_lowerCAmelCase:Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227
|
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCamelCase__ = get_logger(__name__)
def UpperCAmelCase ( snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Optional[Any]=0 ):
os.makedirs(snake_case , exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowerCAmelCase:Any = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCAmelCase:Any = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
_lowerCAmelCase:int = os.path.join(snake_case , snake_case )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(snake_case , snake_case )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCAmelCase:Optional[Any] = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
_lowerCAmelCase:str = os.path.join(snake_case , snake_case )
logger.info(F'Saving model to {output_model_file}' )
torch.save(snake_case , snake_case )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCAmelCase:Tuple = os.path.join(snake_case , F'{MODEL_NAME}_{model_index}' )
os.makedirs(snake_case , exist_ok=snake_case )
logger.info(F'Saving model to {ckpt_dir}' )
_lowerCAmelCase:Tuple = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=snake_case , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , )
logger.info(F'Model saved to {ckpt_dir}' )
def UpperCAmelCase ( snake_case : int , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : int=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
_lowerCAmelCase:Optional[int] = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
_lowerCAmelCase:List[Any] = os.path.join(snake_case , snake_case )
logger.info(F'Loading model from {input_model_file}' )
_lowerCAmelCase:List[Any] = torch.load(snake_case )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCAmelCase:str = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
_lowerCAmelCase:Union[str, Any] = os.path.join(snake_case , snake_case )
logger.info(F'Loading model from {input_model_file}' )
_lowerCAmelCase:Dict = torch.load(snake_case )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCAmelCase:int = (
os.path.join(snake_case , F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
_lowerCAmelCase:List[Any] = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case , storage_reader=dist_cp.FileSystemReader(snake_case ) , planner=DefaultLoadPlanner() , )
_lowerCAmelCase:List[str] = state_dict['''model''']
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(snake_case )
def UpperCAmelCase ( snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Dict , snake_case : Any=0 ):
os.makedirs(snake_case , exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowerCAmelCase:Optional[Any] = FSDP.optim_state_dict(snake_case , snake_case )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_lowerCAmelCase:Dict = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
_lowerCAmelCase:Any = os.path.join(snake_case , snake_case )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(snake_case , snake_case )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
_lowerCAmelCase:Dict = os.path.join(snake_case , F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(snake_case , exist_ok=snake_case )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def UpperCAmelCase ( snake_case : str , snake_case : str , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Dict=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCAmelCase:Dict = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_lowerCAmelCase:Any = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
_lowerCAmelCase:List[str] = os.path.join(snake_case , snake_case )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
_lowerCAmelCase:int = torch.load(snake_case )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
_lowerCAmelCase:List[str] = (
os.path.join(snake_case , F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
_lowerCAmelCase:Dict = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(snake_case ) , )
_lowerCAmelCase:int = optim_state['''optimizer''']
logger.info(F'Optimizer loaded from {ckpt_dir}' )
_lowerCAmelCase:Optional[Any] = FSDP.optim_state_dict_to_load(snake_case , snake_case , snake_case )
optimizer.load_state_dict(snake_case )
| 227
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Dict = ['''image_processor''', '''tokenizer''']
lowerCAmelCase : Optional[Any] = '''BlipImageProcessor'''
lowerCAmelCase : str = '''AutoTokenizer'''
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = False
super().__init__(A__ , A__ )
A__ = self.image_processor
def __call__( self , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = 0 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = True , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
A__ = self.tokenizer
A__ = self.tokenizer(
text=A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , stride=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , return_overflowing_tokens=A__ , return_special_tokens_mask=A__ , return_offsets_mapping=A__ , return_token_type_ids=A__ , return_length=A__ , verbose=A__ , return_tensors=A__ , **A__ , )
return text_encoding
# add pixel_values
A__ = self.image_processor(A__ , return_tensors=A__ )
if text is not None:
A__ = self.tokenizer(
text=A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , stride=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , return_overflowing_tokens=A__ , return_special_tokens_mask=A__ , return_offsets_mapping=A__ , return_token_type_ids=A__ , return_length=A__ , verbose=A__ , return_tensors=A__ , **A__ , )
else:
A__ = None
if text_encoding is not None:
encoding_image_processor.update(A__ )
return encoding_image_processor
def __A ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
return self.tokenizer.batch_decode(*A__ , **A__ )
def __A ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
return self.tokenizer.decode(*A__ , **A__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __A ( self ):
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 720
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : str = """roberta"""
def __init__( self , UpperCAmelCase__=50_265 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3_072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=1e-1_2 , UpperCAmelCase__=1 , UpperCAmelCase__=0 , UpperCAmelCase__=2 , UpperCAmelCase__="absolute" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ):
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class UpperCamelCase ( _UpperCAmelCase ):
@property
def __A ( self ):
if self.task == "multiple-choice":
A__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 232
| 0
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : List[Any] , snake_case__ : NestedDataStructureLike[PathLike] , snake_case__ : Optional[NamedSplit] = None , snake_case__ : Optional[Features] = None , snake_case__ : str = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[int] = None , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(
snake_case__ , split=snake_case__ , features=snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ , streaming=snake_case__ , num_proc=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : Dict = field
UpperCAmelCase__ : int = path_or_paths if isinstance(snake_case__ , snake_case__ ) else {self.split: path_or_paths}
UpperCAmelCase__ : Dict = Json(
cache_dir=snake_case__ , data_files=snake_case__ , features=snake_case__ , field=snake_case__ , **snake_case__ , )
def __a ( self : Dict ):
'''simple docstring'''
# Build iterable dataset
if self.streaming:
UpperCAmelCase__ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = None
self.builder.download_and_prepare(
download_config=snake_case__ , download_mode=snake_case__ , verification_mode=snake_case__ , base_path=snake_case__ , num_proc=self.num_proc , )
UpperCAmelCase__ : Dict = self.builder.as_dataset(
split=self.split , verification_mode=snake_case__ , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , snake_case__ : Dataset , snake_case__ : Union[PathLike, BinaryIO] , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.' )
UpperCAmelCase__ : Union[str, Any] = dataset
UpperCAmelCase__ : List[Any] = path_or_buf
UpperCAmelCase__ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase__ : Any = num_proc
UpperCAmelCase__ : Optional[int] = "utf-8"
UpperCAmelCase__ : str = to_json_kwargs
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.to_json_kwargs.pop("path_or_buf" , snake_case__ )
UpperCAmelCase__ : List[Any] = self.to_json_kwargs.pop("orient" , "records" )
UpperCAmelCase__ : int = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
UpperCAmelCase__ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
UpperCAmelCase__ : Dict = self.to_json_kwargs.pop("compression" , snake_case__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=snake_case__ ) as buffer:
UpperCAmelCase__ : str = self._write(file_obj=snake_case__ , orient=snake_case__ , lines=snake_case__ , index=snake_case__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'The compression parameter is not supported when writing to a buffer, but compression={compression}'
" was passed. Please provide a local path instead." )
UpperCAmelCase__ : int = self._write(
file_obj=self.path_or_buf , orient=snake_case__ , lines=snake_case__ , index=snake_case__ , **self.to_json_kwargs )
return written
def __a ( self : Union[str, Any] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = args
UpperCAmelCase__ : Dict = query_table(
table=self.dataset.data , key=slice(snake_case__ , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase__ : Union[str, Any] = batch.to_pandas().to_json(
path_or_buf=snake_case__ , orient=snake_case__ , lines=snake_case__ , index=snake_case__ , **snake_case__ )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __a ( self : Union[str, Any] , snake_case__ : BinaryIO , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , **snake_case__ : List[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : str = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
UpperCAmelCase__ : List[str] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(snake_case__ )
else:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , snake_case__ , snake_case__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(snake_case__ )
return written
| 438
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mra'''
def __init__( self : Any , snake_case__ : List[str]=5_0_2_6_5 , snake_case__ : Any=7_6_8 , snake_case__ : Union[str, Any]=1_2 , snake_case__ : Optional[Any]=1_2 , snake_case__ : Tuple=3_0_7_2 , snake_case__ : str="gelu" , snake_case__ : Any=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : Union[str, Any]=1 , snake_case__ : List[Any]=0.02 , snake_case__ : str=1e-5 , snake_case__ : List[Any]="absolute" , snake_case__ : str=4 , snake_case__ : List[str]="full" , snake_case__ : Tuple=0 , snake_case__ : Any=0 , snake_case__ : Union[str, Any]=1 , snake_case__ : int=0 , snake_case__ : int=2 , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : str = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Tuple = position_embedding_type
UpperCAmelCase__ : List[str] = block_per_row
UpperCAmelCase__ : Optional[Any] = approx_mode
UpperCAmelCase__ : Any = initial_prior_first_n_blocks
UpperCAmelCase__ : List[Any] = initial_prior_diagonal_n_blocks
| 438
| 1
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class snake_case :
__magic_name__ = None
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
a : Dict = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , A )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a : Dict = os.path.join(A , 'feat_extract.json' )
feat_extract_first.to_json_file(A )
a : Tuple = self.feature_extraction_class.from_json_file(A )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a : Any = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
a : Optional[Any] = self.feature_extraction_class.from_pretrained(A )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : List[str] = self.feature_extraction_class()
self.assertIsNotNone(A )
| 118
|
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def snake_case (A_ :float ):
'''simple docstring'''
if num <= 0:
raise ValueError('math domain error' )
return quad(A_ , 0 , A_ , args=(A_) )[0]
def snake_case (A_ :float , A_ :float ):
'''simple docstring'''
return math.pow(A_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 118
| 1
|
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_snake_case : Optional[Any] = data_utils.TransfoXLTokenizer
_snake_case : int = data_utils.TransfoXLCorpus
_snake_case : Union[str, Any] = data_utils
_snake_case : List[Any] = data_utils
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase , '''rb''' ) as fp:
_a = pickle.load(UpperCamelCase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_a = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f'Save vocabulary to {pytorch_vocab_dump_path}' )
_a = corpus.vocab.__dict__
torch.save(UpperCamelCase , UpperCamelCase )
_a = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , UpperCamelCase )
_a = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(UpperCamelCase , UpperCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_a = os.path.abspath(UpperCamelCase )
_a = os.path.abspath(UpperCamelCase )
print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_a = TransfoXLConfig()
else:
_a = TransfoXLConfig.from_json_file(UpperCamelCase )
print(f'Building PyTorch model from configuration: {config}' )
_a = TransfoXLLMHeadModel(UpperCamelCase )
_a = load_tf_weights_in_transfo_xl(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
_a = os.path.join(UpperCamelCase , UpperCamelCase )
_a = os.path.join(UpperCamelCase , UpperCamelCase )
print(f'Save PyTorch model to {os.path.abspath(UpperCamelCase )}' )
torch.save(model.state_dict() , UpperCamelCase )
print(f'Save configuration file to {os.path.abspath(UpperCamelCase )}' )
with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
_snake_case : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 22
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_a = -(labels.shape[-1] * loss.item())
_a = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 22
| 1
|
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case: str =TapasConfig.from_json_file(__UpperCAmelCase )
# set absolute/relative position embeddings parameter
snake_case: Optional[Any] =reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
snake_case: Any =TapasForQuestionAnswering(config=__UpperCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
snake_case: Any =4
snake_case: List[Any] =True
# hparam_utils.py hparams
snake_case: Tuple =0.66_4694
snake_case: Dict =0.20_7951
snake_case: Optional[Any] =0.12_1194
snake_case: Optional[int] =True
snake_case: Optional[Any] =True
snake_case: int =False
snake_case: List[Any] =0.035_2513
snake_case: Any =TapasForQuestionAnswering(config=__UpperCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
snake_case: List[Any] =4
snake_case: List[str] =False
# hparam_utils.py hparams
snake_case: List[Any] =36.4519
snake_case: Optional[int] =0.90_3421
snake_case: List[str] =222.088
snake_case: Optional[Any] =True
snake_case: Any =True
snake_case: Any =True
snake_case: Tuple =0.76_3141
snake_case: int =TapasForQuestionAnswering(config=__UpperCAmelCase )
elif task == "TABFACT":
snake_case: str =TapasForSequenceClassification(config=__UpperCAmelCase )
elif task == "MLM":
snake_case: List[Any] =TapasForMaskedLM(config=__UpperCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
snake_case: Optional[int] =TapasModel(config=__UpperCAmelCase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__UpperCAmelCase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
snake_case: Any =TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=5_12 )
tokenizer.save_pretrained(__UpperCAmelCase )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 705
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class a_ ( snake_case ):
UpperCAmelCase : Optional[int] = """markuplm"""
def __init__( self : Any , a_ : Optional[int]=3_0_5_2_2 , a_ : Optional[int]=7_6_8 , a_ : List[Any]=1_2 , a_ : int=1_2 , a_ : int=3_0_7_2 , a_ : int="gelu" , a_ : str=0.1 , a_ : Tuple=0.1 , a_ : List[str]=5_1_2 , a_ : Any=2 , a_ : Union[str, Any]=0.0_2 , a_ : Any=1E-1_2 , a_ : Optional[int]=0 , a_ : str=0 , a_ : Optional[Any]=2 , a_ : Optional[Any]=2_5_6 , a_ : Tuple=1_0_2_4 , a_ : List[str]=2_1_6 , a_ : List[str]=1_0_0_1 , a_ : Optional[int]=3_2 , a_ : Optional[int]=5_0 , a_ : Optional[int]="absolute" , a_ : Union[str, Any]=True , a_ : List[Any]=None , **a_ : Union[str, Any] , ) -> Optional[int]:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
snake_case: Tuple =vocab_size
snake_case: int =hidden_size
snake_case: Optional[int] =num_hidden_layers
snake_case: List[str] =num_attention_heads
snake_case: Dict =hidden_act
snake_case: List[Any] =intermediate_size
snake_case: List[str] =hidden_dropout_prob
snake_case: Dict =attention_probs_dropout_prob
snake_case: List[Any] =max_position_embeddings
snake_case: Optional[int] =type_vocab_size
snake_case: Optional[int] =initializer_range
snake_case: List[str] =layer_norm_eps
snake_case: Optional[int] =position_embedding_type
snake_case: int =use_cache
snake_case: Tuple =classifier_dropout
# additional properties
snake_case: Dict =max_depth
snake_case: Optional[Any] =max_xpath_tag_unit_embeddings
snake_case: List[str] =max_xpath_subs_unit_embeddings
snake_case: Optional[int] =tag_pad_id
snake_case: List[str] =subs_pad_id
snake_case: str =xpath_unit_hidden_size
| 347
| 0
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
snake_case = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case , cache_dir=__snake_case )
snake_case = [t[-1] for t in os.walk(os.path.join(__snake_case , os.listdir(__snake_case )[0] , '''snapshots''' ) )]
snake_case = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case , snake_case = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case )
snake_case = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case = jax.random.PRNGKey(0 )
snake_case = 4
snake_case = jax.device_count()
snake_case = num_samples * [prompt]
snake_case = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
snake_case = replicate(__snake_case )
snake_case = jax.random.split(__snake_case , __snake_case )
snake_case = shard(__snake_case )
snake_case = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(__snake_case , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
snake_case = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__snake_case ) == num_samples
def a_ ( self ):
snake_case , snake_case = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__snake_case )
snake_case = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case = jax.random.PRNGKey(0 )
snake_case = 5_0
snake_case = jax.device_count()
snake_case = num_samples * [prompt]
snake_case = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
snake_case = replicate(__snake_case )
snake_case = jax.random.split(__snake_case , __snake_case )
snake_case = shard(__snake_case )
snake_case = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def a_ ( self ):
snake_case , snake_case = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case )
snake_case = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case = jax.random.PRNGKey(0 )
snake_case = 5_0
snake_case = jax.device_count()
snake_case = num_samples * [prompt]
snake_case = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
snake_case = replicate(__snake_case )
snake_case = jax.random.split(__snake_case , __snake_case )
snake_case = shard(__snake_case )
snake_case = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def a_ ( self ):
snake_case , snake_case = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
snake_case = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case = jax.random.PRNGKey(0 )
snake_case = 5_0
snake_case = jax.device_count()
snake_case = num_samples * [prompt]
snake_case = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
snake_case = replicate(__snake_case )
snake_case = jax.random.split(__snake_case , __snake_case )
snake_case = shard(__snake_case )
snake_case = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def a_ ( self ):
snake_case = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__snake_case , steps_offset=1 , )
snake_case , snake_case = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__snake_case , safety_checker=__snake_case , )
snake_case = scheduler.create_state()
snake_case = scheduler_state
snake_case = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case = jax.random.PRNGKey(0 )
snake_case = 5_0
snake_case = jax.device_count()
snake_case = num_samples * [prompt]
snake_case = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
snake_case = replicate(__snake_case )
snake_case = jax.random.split(__snake_case , __snake_case )
snake_case = shard(__snake_case )
snake_case = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def a_ ( self ):
snake_case = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case = jax.device_count()
snake_case = num_samples * [prompt]
snake_case = jax.random.split(jax.random.PRNGKey(0 ) , __snake_case )
snake_case , snake_case = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , )
snake_case = replicate(__snake_case )
snake_case = pipeline.prepare_inputs(__snake_case )
snake_case = shard(__snake_case )
snake_case = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
snake_case = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
snake_case , snake_case = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , use_memory_efficient_attention=__snake_case , )
snake_case = replicate(__snake_case )
snake_case = pipeline.prepare_inputs(__snake_case )
snake_case = shard(__snake_case )
snake_case = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
snake_case = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 550
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = '''ZinengTang/tvlt-base'''
snake_case = tempfile.mkdtemp()
def a_ ( self , **__snake_case ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **__snake_case )
def a_ ( self , **__snake_case ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__snake_case )
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
processor.save_pretrained(self.tmpdirname )
snake_case = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __snake_case )
self.assertIsInstance(processor.image_processor , __snake_case )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([1_2_0_0_0] )
snake_case = feature_extractor(__snake_case , return_tensors='''np''' )
snake_case = processor(audio=__snake_case , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([3, 2_2_4, 2_2_4] )
snake_case = image_processor(__snake_case , return_tensors='''np''' )
snake_case = processor(images=__snake_case , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([1_2_0_0_0] )
snake_case = np.ones([3, 2_2_4, 2_2_4] )
snake_case = processor(audio=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 550
| 1
|
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : Dict , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : List[Any] = 13
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : Optional[int] = 30
UpperCAmelCase_ : str = self.seq_length + self.mem_len
UpperCAmelCase_ : Optional[Any] = 15
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : str = 99
UpperCAmelCase_ : List[Any] = [10, 50, 80]
UpperCAmelCase_ : Dict = 32
UpperCAmelCase_ : List[Any] = 32
UpperCAmelCase_ : Any = 4
UpperCAmelCase_ : Tuple = 8
UpperCAmelCase_ : Union[str, Any] = 128
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : List[str] = self.vocab_size - 1
UpperCAmelCase_ : Optional[int] = 0.01
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Optional[Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : int , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = TFTransfoXLModel(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case ).to_tuple()
UpperCAmelCase_ : Dict = {'''input_ids''': input_ids_a, '''mems''': mems_a}
UpperCAmelCase_ : Tuple = model(__snake_case ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCamelCase ( self : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Any = TFTransfoXLLMHeadModel(__snake_case )
UpperCAmelCase_ : Optional[int] = model(__snake_case ).to_tuple()
UpperCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
UpperCAmelCase_ : List[str] = model(__snake_case ).to_tuple()
UpperCAmelCase_ : Dict = model([input_ids_a, mems_a] ).to_tuple()
UpperCAmelCase_ : List[Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
UpperCAmelCase_ : Union[str, Any] = model(__snake_case ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCamelCase ( self : Dict , __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = TFTransfoXLForSequenceClassification(__snake_case )
UpperCAmelCase_ : List[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
(UpperCAmelCase_) : str = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
A_ : Union[str, Any] = () if is_tf_available() else ()
A_ : Optional[int] = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
A_ : Dict = False
A_ : int = False
A_ : Any = False
A_ : str = False
def _lowerCamelCase ( self : Dict , __snake_case : str , __snake_case : Dict , __snake_case : int , __snake_case : str , __snake_case : int ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = TFTransfoXLModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=__snake_case , d_embed=37 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
self.model_tester.set_seed()
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
self.model_tester.set_seed()
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__snake_case )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(__snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCAmelCase_ : int = model.get_output_embeddings()
assert isinstance(__snake_case , tf.keras.layers.Layer )
UpperCAmelCase_ : int = model.get_bias()
assert name is None
else:
UpperCAmelCase_ : int = model.get_output_embeddings()
assert x is None
UpperCAmelCase_ : Union[str, Any] = model.get_bias()
assert name is None
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
pass
@slow
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = TFTransfoXLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
pass
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
UpperCAmelCase_ : Optional[Any] = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCAmelCase_ : Union[str, Any] = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCAmelCase_ : Union[str, Any] = model.generate(__snake_case , max_length=200 , do_sample=__snake_case )
self.assertListEqual(output_ids[0].numpy().tolist() , __snake_case )
| 706
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 641
| 0
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A_ ( __lowercase = 8 ):
UpperCamelCase_ : Optional[Any] =ascii_letters + digits + punctuation
return "".join(secrets.choice(__lowercase ) for _ in range(__lowercase ) )
def A_ ( __lowercase , __lowercase ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__lowercase )
UpperCamelCase_ : Dict =i // 3
UpperCamelCase_ : List[Any] =i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCamelCase_ : str =(
chars_incl
+ random(__lowercase , quotient + remainder )
+ random(__lowercase , __lowercase )
+ random(__lowercase , __lowercase )
)
UpperCamelCase_ : List[Any] =list(__lowercase )
shuffle(__lowercase )
return "".join(__lowercase )
# random is a generalised function for letters, characters and numbers
def A_ ( __lowercase , __lowercase ):
return "".join(secrets.choice(__lowercase ) for _ in range(__lowercase ) )
def A_ ( __lowercase , __lowercase ):
pass # Put your code here...
def A_ ( __lowercase , __lowercase ):
pass # Put your code here...
def A_ ( __lowercase , __lowercase ):
pass # Put your code here...
def A_ ( __lowercase , __lowercase = 8 ):
if len(__lowercase ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCamelCase_ : Union[str, Any] =any(char in ascii_uppercase for char in password )
UpperCamelCase_ : List[Any] =any(char in ascii_lowercase for char in password )
UpperCamelCase_ : Union[str, Any] =any(char in digits for char in password )
UpperCamelCase_ : Any =any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A_ ( ):
UpperCamelCase_ : Optional[int] =int(input('Please indicate the max length of your password: ' ).strip() )
UpperCamelCase_ : List[str] =input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(__lowercase ) )
print(
'Alternative Password generated:' , alternative_password_generator(__lowercase , __lowercase ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 357
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class a__ ( A__ ):
UpperCAmelCase__ = '''instructblip_vision_model'''
def __init__( self :int , _lowerCamelCase :Optional[int]=1_408 , _lowerCamelCase :int=6_144 , _lowerCamelCase :str=39 , _lowerCamelCase :Tuple=16 , _lowerCamelCase :List[Any]=224 , _lowerCamelCase :int=14 , _lowerCamelCase :int="gelu" , _lowerCamelCase :Optional[Any]=1E-6 , _lowerCamelCase :str=0.0 , _lowerCamelCase :Optional[Any]=1E-1_0 , _lowerCamelCase :str=True , **_lowerCamelCase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
UpperCamelCase_ : Dict =hidden_size
UpperCamelCase_ : Tuple =intermediate_size
UpperCamelCase_ : Optional[int] =num_hidden_layers
UpperCamelCase_ : Union[str, Any] =num_attention_heads
UpperCamelCase_ : Optional[Any] =patch_size
UpperCamelCase_ : List[Any] =image_size
UpperCamelCase_ : Dict =initializer_range
UpperCamelCase_ : List[Any] =attention_dropout
UpperCamelCase_ : List[str] =layer_norm_eps
UpperCamelCase_ : List[str] =hidden_act
UpperCamelCase_ : Tuple =qkv_bias
@classmethod
def lowerCamelCase_ ( cls :Optional[Any] , _lowerCamelCase :Union[str, os.PathLike] , **_lowerCamelCase :Any ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : List[str] =cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
UpperCamelCase_ : Tuple =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class a__ ( A__ ):
UpperCAmelCase__ = '''instructblip_qformer'''
def __init__( self :Dict , _lowerCamelCase :List[Any]=30_522 , _lowerCamelCase :Optional[int]=768 , _lowerCamelCase :Tuple=12 , _lowerCamelCase :List[str]=12 , _lowerCamelCase :int=3_072 , _lowerCamelCase :Tuple="gelu" , _lowerCamelCase :Dict=0.1 , _lowerCamelCase :List[Any]=0.1 , _lowerCamelCase :Optional[int]=512 , _lowerCamelCase :List[Any]=0.02 , _lowerCamelCase :int=1E-1_2 , _lowerCamelCase :str=0 , _lowerCamelCase :Dict="absolute" , _lowerCamelCase :Optional[int]=2 , _lowerCamelCase :Union[str, Any]=1_408 , **_lowerCamelCase :Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : Optional[Any] =vocab_size
UpperCamelCase_ : Tuple =hidden_size
UpperCamelCase_ : Union[str, Any] =num_hidden_layers
UpperCamelCase_ : List[str] =num_attention_heads
UpperCamelCase_ : List[str] =hidden_act
UpperCamelCase_ : Tuple =intermediate_size
UpperCamelCase_ : Optional[Any] =hidden_dropout_prob
UpperCamelCase_ : List[str] =attention_probs_dropout_prob
UpperCamelCase_ : List[str] =max_position_embeddings
UpperCamelCase_ : List[str] =initializer_range
UpperCamelCase_ : str =layer_norm_eps
UpperCamelCase_ : int =position_embedding_type
UpperCamelCase_ : Tuple =cross_attention_frequency
UpperCamelCase_ : List[Any] =encoder_hidden_size
@classmethod
def lowerCamelCase_ ( cls :Optional[Any] , _lowerCamelCase :Union[str, os.PathLike] , **_lowerCamelCase :List[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : Optional[Any] =cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
UpperCamelCase_ : List[Any] =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class a__ ( A__ ):
UpperCAmelCase__ = '''instructblip'''
UpperCAmelCase__ = True
def __init__( self :str , _lowerCamelCase :Optional[Any]=None , _lowerCamelCase :List[str]=None , _lowerCamelCase :List[Any]=None , _lowerCamelCase :Union[str, Any]=32 , **_lowerCamelCase :Tuple ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
if vision_config is None:
UpperCamelCase_ : Optional[int] ={}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
UpperCamelCase_ : Union[str, Any] ={}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
UpperCamelCase_ : str ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCamelCase_ : Any =InstructBlipVisionConfig(**_lowerCamelCase )
UpperCamelCase_ : Optional[Any] =InstructBlipQFormerConfig(**_lowerCamelCase )
UpperCamelCase_ : str =text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCamelCase_ : int =CONFIG_MAPPING[text_model_type](**_lowerCamelCase )
UpperCamelCase_ : Tuple =self.text_config.tie_word_embeddings
UpperCamelCase_ : Any =self.text_config.is_encoder_decoder
UpperCamelCase_ : List[Any] =num_query_tokens
UpperCamelCase_ : Tuple =self.vision_config.hidden_size
UpperCamelCase_ : Tuple =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCamelCase_ : int =1.0
UpperCamelCase_ : List[str] =0.02
@classmethod
def lowerCamelCase_ ( cls :Optional[Any] , _lowerCamelCase :InstructBlipVisionConfig , _lowerCamelCase :InstructBlipQFormerConfig , _lowerCamelCase :PretrainedConfig , **_lowerCamelCase :Dict , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowerCamelCase , )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : str =copy.deepcopy(self.__dict__ )
UpperCamelCase_ : Dict =self.vision_config.to_dict()
UpperCamelCase_ : Tuple =self.qformer_config.to_dict()
UpperCamelCase_ : List[str] =self.text_config.to_dict()
UpperCamelCase_ : List[str] =self.__class__.model_type
return output
| 357
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {"vocab_file": "vocab.txt"}
_lowerCAmelCase : Optional[Any] = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_lowerCAmelCase : Optional[int] = {
"openbmb/cpm-ant-10b": 1_0_2_4,
}
def lowerCAmelCase ( _lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = collections.OrderedDict()
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as reader:
UpperCAmelCase__ = reader.readlines()
for index, token in enumerate(_lowerCAmelCase ):
UpperCAmelCase__ = token.rstrip("\n" )
UpperCAmelCase__ = index
return vocab
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Any , lowerCamelCase :List[Any] , lowerCamelCase :Tuple="<unk>" , lowerCamelCase :List[str]=200 ) -> List[Any]:
UpperCAmelCase__ = vocab
UpperCAmelCase__ = unk_token
UpperCAmelCase__ = max_input_chars_per_word
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :Any ) -> Any:
UpperCAmelCase__ = list(lowerCamelCase )
if len(lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCAmelCase__ = 0
UpperCAmelCase__ = []
while start < len(lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = None
while start < end:
UpperCAmelCase__ = "".join(chars[start:end] )
if substr in self.vocab:
UpperCAmelCase__ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase )
UpperCAmelCase__ = end
return sub_tokens
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase_ = False
def __init__( self :Tuple , lowerCamelCase :Optional[Any] , lowerCamelCase :Optional[int]="<d>" , lowerCamelCase :Optional[int]="</d>" , lowerCamelCase :Optional[int]="<s>" , lowerCamelCase :List[str]="</s>" , lowerCamelCase :Optional[Any]="<pad>" , lowerCamelCase :Optional[Any]="<unk>" , lowerCamelCase :List[Any]="</n>" , lowerCamelCase :Optional[Any]="</_>" , lowerCamelCase :Union[str, Any]="left" , **lowerCamelCase :Dict , ) -> Any:
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=lowerCamelCase , eod_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , unk_token=lowerCamelCase , line_token=lowerCamelCase , space_token=lowerCamelCase , padding_side=lowerCamelCase , **lowerCamelCase , )
UpperCAmelCase__ = bod_token
UpperCAmelCase__ = eod_token
UpperCAmelCase__ = load_vocab(lowerCamelCase )
UpperCAmelCase__ = self.encoder[space_token]
UpperCAmelCase__ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCAmelCase__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def UpperCAmelCase_ ( self :Tuple ) -> Any:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase_ ( self :str ) -> List[Any]:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase_ ( self :Optional[int] ) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase_ ( self :Optional[int] ) -> int:
return len(self.encoder )
def UpperCAmelCase_ ( self :int ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self :str , lowerCamelCase :int ) -> int:
UpperCAmelCase__ = []
for x in jieba.cut(lowerCamelCase , cut_all=lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase ) )
return output_tokens
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :Any , **lowerCamelCase :Optional[Any] ) -> Optional[int]:
UpperCAmelCase__ = [i for i in token_ids if i >= 0]
UpperCAmelCase__ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase , **lowerCamelCase )
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :List[str] ) -> Optional[int]:
return token in self.encoder
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :List[str] ) -> str:
return "".join(lowerCamelCase )
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :List[Any] ) -> Any:
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self :str , lowerCamelCase :Tuple ) -> Dict:
return self.decoder.get(lowerCamelCase , self.unk_token )
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :str , lowerCamelCase :Optional[str] = None ) -> Tuple[str]:
if os.path.isdir(lowerCamelCase ):
UpperCAmelCase__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
UpperCAmelCase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
UpperCAmelCase__ = 0
if " " in self.encoder:
UpperCAmelCase__ = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
UpperCAmelCase__ = self.encoder["\n"]
del self.encoder["\n"]
UpperCAmelCase__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :List[int] , lowerCamelCase :List[int] = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase_ ( self :int , lowerCamelCase :List[int] , lowerCamelCase :Optional[List[int]] = None , lowerCamelCase :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase ))
return [1] + ([0] * len(lowerCamelCase ))
| 703
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCAmelCase : Optional[int] = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : List[str] = """facebook/bart-large-mnli"""
__lowerCAmelCase : Union[str, Any] = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
__lowerCAmelCase : Dict = """text_classifier"""
__lowerCAmelCase : Optional[int] = AutoTokenizer
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification
__lowerCAmelCase : Any = ["""text""", ["""text"""]]
__lowerCAmelCase : List[str] = ["""text"""]
def _a ( self ):
'''simple docstring'''
super().setup()
UpperCamelCase : int = self.model.config
UpperCamelCase : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCamelCase : str = int(_A )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def _a ( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : int = labels
return self.pre_processor(
[text] * len(_A ) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : List[Any] = outputs.logits
UpperCamelCase : Optional[int] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 102
|
'''simple docstring'''
class __lowercase : # Public class to implement a graph
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
__a = row
__a = col
__a = graph
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
# Checking all 8 elements surrounding nth element
__a = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__a = [-1, 0, 1, -1, 1, -1, 0, 1]
__a = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase )
def UpperCamelCase__ ( self ) -> int: # And finally, count all islands.
__a = [[False for j in range(self.COL )] for i in range(self.ROW )]
__a = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += 1
return count
| 539
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = self.dummy_uncond_unet
lowercase_ : Dict = ScoreSdeVeScheduler()
lowercase_ : Optional[Any] = ScoreSdeVePipeline(unet=_lowercase , scheduler=_lowercase )
sde_ve.to(_lowercase )
sde_ve.set_progress_bar_config(disable=_lowercase )
lowercase_ : Any = torch.manual_seed(0 )
lowercase_ : Optional[int] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_lowercase ).images
lowercase_ : Any = torch.manual_seed(0 )
lowercase_ : Optional[int] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_lowercase , return_dict=_lowercase )[
0
]
lowercase_ : int = image[0, -3:, -3:, -1]
lowercase_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[int] = 'google/ncsnpp-church-256'
lowercase_ : str = UNetaDModel.from_pretrained(_lowercase )
lowercase_ : List[str] = ScoreSdeVeScheduler.from_pretrained(_lowercase )
lowercase_ : List[str] = ScoreSdeVePipeline(unet=_lowercase , scheduler=_lowercase )
sde_ve.to(_lowercase )
sde_ve.set_progress_bar_config(disable=_lowercase )
lowercase_ : Any = torch.manual_seed(0 )
lowercase_ : int = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=_lowercase ).images
lowercase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase_ : Any = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 7
|
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7
| 1
|
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
UpperCamelCase_ : str = 299_792_458
# Symbols
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Dict = symbols("""ct x y z""")
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return 1 / sqrt(1 - beta(_snake_case ) ** 2 )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return np.array(
[
[gamma(_snake_case ), -gamma(_snake_case ) * beta(_snake_case ), 0, 0],
[-gamma(_snake_case ) * beta(_snake_case ), gamma(_snake_case ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _lowerCAmelCase (_lowercase , _lowercase = None ):
"""simple docstring"""
if event is None:
a__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_snake_case ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
UpperCamelCase_ : str = transform(29_979_245)
print("""Example of four vector: """)
print(F"ct\' = {four_vector[0]}")
print(F"x\' = {four_vector[1]}")
print(F"y\' = {four_vector[2]}")
print(F"z\' = {four_vector[3]}")
# Substitute symbols with numerical values
UpperCamelCase_ : Dict = {ct: c, x: 1, y: 1, z: 1}
UpperCamelCase_ : str = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"\n{numerical_vector}")
| 331
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 181
| 0
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def snake_case_ ( snake_case , snake_case ) -> Union[str, Any]:
lowercase__: Optional[Any] = RobertaPreLayerNormConfig.from_pretrained(
snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
lowercase__: Any = torch.load(hf_hub_download(repo_id=snake_case , filename='pytorch_model.bin' ) )
lowercase__: Dict = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
lowercase__: Union[str, Any] = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
lowercase__: int = tensor_value
lowercase__: List[str] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case , config=snake_case , state_dict=snake_case )
model.save_pretrained(snake_case )
# convert tokenizer
lowercase__: Any = AutoTokenizer.from_pretrained(snake_case )
tokenizer.save_pretrained(snake_case )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowerCAmelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 701
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __a :
__lowercase : float
__lowercase : TreeNode | None = None
__lowercase : TreeNode | None = None
def snake_case_ ( snake_case ) -> bool:
# Validation
def is_valid_tree(snake_case ) -> bool:
if node is None:
return True
if not isinstance(snake_case , snake_case ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
snake_case , snake_case , snake_case ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case )
)
return is_binary_search_tree_recursive_check(snake_case , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335
| 0
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.02 , A=["stage2", "stage3", "stage4"] , A=3 , A=None , ) -> List[str]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = num_stages
_a = hidden_sizes
_a = depths
_a = is_training
_a = use_labels
_a = intermediate_size
_a = hidden_act
_a = type_sequence_label_size
_a = initializer_range
_a = out_features
_a = num_labels
_a = scope
_a = num_stages
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=A , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=A , loss_ignore_index=255 , num_labels=self.num_labels , )
def a__ (self , A , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = UperNetForSemanticSegmentation(config=A )
model.to(A )
model.eval()
_a = model(A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def a__ (self ) -> int:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__lowerCamelCase : Any = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
__lowerCamelCase : List[Any] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : int = False
__lowerCamelCase : str = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : int = False
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = UperNetModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def a__ (self ) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ (self ) -> List[str]:
"""simple docstring"""
return
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def a__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def a__ (self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def a__ (self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def a__ (self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a__ (self ) -> Any:
"""simple docstring"""
pass
def a__ (self ) -> str:
"""simple docstring"""
def check_hidden_states_output(A , A , A ):
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(A , A , A )
def a__ (self ) -> str:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = _config_zero_init(A )
_a = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_a = model_class(config=A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
@slow
def a__ (self ) -> str:
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = UperNetForSemanticSegmentation.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCAmelCase ():
"""simple docstring"""
_a = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''')
_a = Image.open(__A).convert('''RGB''')
return image
@require_torch
@require_vision
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
_a = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(A )
_a = prepare_img()
_a = processor(images=A , return_tensors='''pt''' ).to(A )
with torch.no_grad():
_a = model(**A )
_a = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , A )
_a = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1E-4 ) )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
_a = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(A )
_a = prepare_img()
_a = processor(images=A , return_tensors='''pt''' ).to(A )
with torch.no_grad():
_a = model(**A )
_a = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , A )
_a = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1E-4 ) )
| 11
|
import os
def __magic_name__ ( lowerCAmelCase_ = "input.txt"):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase_) , lowerCAmelCase_)) as input_file:
lowerCamelCase_ : Dict = [
[int(lowerCAmelCase_) for element in line.split(",")]
for line in input_file.readlines()
]
lowerCamelCase_ : str = len(lowerCAmelCase_)
lowerCamelCase_ : Any = len(matrix[0])
lowerCamelCase_ : Optional[Any] = [[-1 for _ in range(lowerCAmelCase_)] for _ in range(lowerCAmelCase_)]
for i in range(lowerCAmelCase_):
lowerCamelCase_ : Union[str, Any] = matrix[i][0]
for j in range(1 , lowerCAmelCase_):
for i in range(lowerCAmelCase_):
lowerCamelCase_ : List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase_):
lowerCamelCase_ : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j])
for i in range(rows - 2 , -1 , -1):
lowerCamelCase_ : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j])
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums)
if __name__ == "__main__":
print(f'''{solution() = }''')
| 250
| 0
|
'''simple docstring'''
from __future__ import annotations
def _A ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ):
"""simple docstring"""
__lowercase =cipher_alphabet or [chr(_lowerCAmelCase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__lowercase ={
'a': 0.0_84_97,
'b': 0.0_14_92,
'c': 0.0_22_02,
'd': 0.0_42_53,
'e': 0.1_11_62,
'f': 0.0_22_28,
'g': 0.0_20_15,
'h': 0.0_60_94,
'i': 0.0_75_46,
'j': 0.0_01_53,
'k': 0.0_12_92,
'l': 0.0_40_25,
'm': 0.0_24_06,
'n': 0.0_67_49,
'o': 0.0_75_07,
'p': 0.0_19_29,
'q': 0.0_00_95,
'r': 0.0_75_87,
's': 0.0_63_27,
't': 0.0_93_56,
'u': 0.0_27_58,
'v': 0.0_09_78,
'w': 0.0_25_60,
'x': 0.0_01_50,
'y': 0.0_19_94,
'z': 0.0_00_77,
}
else:
# Custom frequencies dictionary
__lowercase =frequencies_dict
if not case_sensitive:
__lowercase =ciphertext.lower()
# Chi squared statistic values
__lowercase ={}
# cycle through all of the shifts
for shift in range(len(_lowerCAmelCase ) ):
__lowercase =''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__lowercase =(alphabet_letters.index(letter.lower() ) - shift) % len(
_lowerCAmelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__lowercase =0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__lowercase =letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__lowercase =decrypted_with_shift.lower().count(_lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowercase =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowercase =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__lowercase =decrypted_with_shift.count(_lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowercase =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowercase =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__lowercase =(
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_lowerCAmelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__lowercase =min(
_lowerCAmelCase , key=_lowerCAmelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__lowercase
) , (
__lowercase
) ,
) =chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 454
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
__lowercase ='xvjiarui/stable-diffusion-2-inpainting'
__lowercase , __lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(_lowerCAmelCase , safety_checker=_lowerCAmelCase)
__lowercase ='Face of a yellow cat, high resolution, sitting on a park bench'
__lowercase =jax.random.PRNGKey(0)
__lowercase =5_0
__lowercase =jax.device_count()
__lowercase =num_samples * [prompt]
__lowercase =num_samples * [init_image]
__lowercase =num_samples * [mask_image]
__lowercase , __lowercase , __lowercase =pipeline.prepare_inputs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# shard inputs and rng
__lowercase =replicate(_lowerCAmelCase)
__lowercase =jax.random.split(_lowerCAmelCase , jax.device_count())
__lowercase =shard(_lowerCAmelCase)
__lowercase =shard(_lowerCAmelCase)
__lowercase =shard(_lowerCAmelCase)
__lowercase =pipeline(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase)
__lowercase =output.images.reshape(_lowerCAmelCase , 5_1_2 , 5_1_2 , 3)
__lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__lowercase =jnp.asarray(jax.device_get(image_slice.flatten()))
__lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 454
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a : Union[str, Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''MobileViTFeatureExtractor''']
a : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 555
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
def __init__( self : Optional[Any] , __UpperCamelCase : float , __UpperCamelCase : int ) ->Dict:
'''simple docstring'''
if k in (0.0_4, 0.0_6):
_UpperCAmelCase = k
_UpperCAmelCase = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Tuple ) ->str:
'''simple docstring'''
return str(self.k )
def _snake_case ( self : str , __UpperCamelCase : str ) ->tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
_UpperCAmelCase = cva.imread(__UpperCamelCase , 0 )
_UpperCAmelCase ,_UpperCAmelCase = img.shape
_UpperCAmelCase = []
_UpperCAmelCase = img.copy()
_UpperCAmelCase = cva.cvtColor(__UpperCamelCase , cva.COLOR_GRAY2RGB )
_UpperCAmelCase ,_UpperCAmelCase = np.gradient(__UpperCamelCase )
_UpperCAmelCase = dx**2
_UpperCAmelCase = dy**2
_UpperCAmelCase = dx * dy
_UpperCAmelCase = 0.0_4
_UpperCAmelCase = self.window_size // 2
for y in range(__UpperCamelCase , h - offset ):
for x in range(__UpperCamelCase , w - offset ):
_UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_UpperCAmelCase = (wxx * wyy) - (wxy**2)
_UpperCAmelCase = wxx + wyy
_UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
a : List[Any] = HarrisCorner(0.04, 3)
a , a : List[Any] = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 555
| 1
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False )-> Union[str, Any]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase = os.path.abspath(UpperCAmelCase_ )
logger.info(F"Loading PyTorch weights from {pt_path}" )
UpperCamelCase = torch.load(UpperCAmelCase_ , map_location="cpu" )
logger.info(F"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
UpperCamelCase = convert_pytorch_state_dict_to_flax(UpperCAmelCase_ , UpperCAmelCase_ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase = convert_pytorch_sharded_state_dict_to_flax(UpperCAmelCase_ , UpperCAmelCase_ )
return flax_state_dict
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )-> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(UpperCAmelCase_ ) -> bool:
return len(set(UpperCAmelCase_ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(UpperCAmelCase_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(UpperCAmelCase_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(UpperCAmelCase_ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(UpperCAmelCase_ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(UpperCAmelCase_ ):
UpperCamelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(UpperCAmelCase_ ):
UpperCamelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> Union[str, Any]:
"""simple docstring"""
# convert pytorch tensor to numpy
UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase = flax_model.params["params"]
else:
UpperCamelCase = flax_model.params
UpperCamelCase = flatten_dict(UpperCAmelCase_ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(UpperCAmelCase_ )
UpperCamelCase = {}
UpperCamelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase , UpperCamelCase = rename_key_and_reshape_tensor(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# add model prefix if necessary
UpperCamelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase = jnp.asarray(UpperCAmelCase_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase = jnp.asarray(UpperCAmelCase_ )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase = jnp.asarray(UpperCAmelCase_ )
return unflatten_dict(UpperCAmelCase_ )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
import torch
# Load the index
UpperCamelCase = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase = torch.load(UpperCAmelCase_ )
UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase = flax_model.params["params"]
UpperCamelCase = flatten_dict(UpperCAmelCase_ )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase = flax_model.params
UpperCamelCase = flatten_dict(UpperCAmelCase_ )
UpperCamelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase , UpperCamelCase = rename_key_and_reshape_tensor(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# add model prefix if necessary
UpperCamelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase = jnp.asarray(UpperCAmelCase_ )
continue
if "var" in flax_key[-1]:
UpperCamelCase = jnp.asarray(UpperCAmelCase_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase = jnp.asarray(UpperCAmelCase_ )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase = jnp.asarray(UpperCAmelCase_ )
return unflatten_dict(UpperCAmelCase_ )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> int:
"""simple docstring"""
UpperCamelCase = os.path.abspath(UpperCAmelCase_ )
logger.info(F"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
UpperCamelCase = getattr(UpperCAmelCase_ , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(UpperCAmelCase_ , "rb" ) as state_f:
try:
UpperCamelCase = from_bytes(UpperCAmelCase_ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> Any:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase = flatten_dict(jax.tree_util.tree_map(lambda UpperCAmelCase_ : x.dtype == jnp.bfloataa , UpperCAmelCase_ ) ).values()
if any(UpperCAmelCase_ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase = jax.tree_util.tree_map(
lambda UpperCAmelCase_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCAmelCase_ )
UpperCamelCase = flatten_dict(UpperCAmelCase_ )
UpperCamelCase = pt_model.state_dict()
UpperCamelCase = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase = []
UpperCamelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(UpperCAmelCase_ ) not in pt_model_dict:
# conv layer
UpperCamelCase = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase = jnp.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCAmelCase_ ) not in pt_model_dict:
# linear layer
UpperCamelCase = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase = ".".join(UpperCAmelCase_ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase = key.split("." )
UpperCamelCase = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase = key_components[-2] + "_v"
if name is not None:
UpperCamelCase = key_components[:-3] + [name]
UpperCamelCase = ".".join(UpperCAmelCase_ )
UpperCamelCase = key
if flax_key in special_pt_names:
UpperCamelCase = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
UpperCamelCase = np.asarray(UpperCAmelCase_ ) if not isinstance(UpperCAmelCase_ , np.ndarray ) else flax_tensor
UpperCamelCase = torch.from_numpy(UpperCAmelCase_ )
# remove from missing keys
missing_keys.remove(UpperCAmelCase_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCAmelCase_ )
pt_model.load_state_dict(UpperCAmelCase_ )
# re-transform missing_keys to list
UpperCamelCase = list(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(UpperCAmelCase_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
" use it for predictions and inference." )
else:
logger.warning(
F"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
"If your task is similar to the task the model of the checkpoint was trained on, "
F"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 556
|
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 3
class __a ( _lowerCAmelCase ):
pass
def lowerCamelCase__ ( UpperCAmelCase_ )-> List[str]:
"""simple docstring"""
for shard in shards:
for i in range(UpperCAmelCase_ ):
yield {"i": i, "shard": shard}
def lowerCamelCase__ ( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = int(os.environ["RANK"] )
UpperCamelCase = int(os.environ["WORLD_SIZE"] )
UpperCamelCase = ArgumentParser()
parser.add_argument("--streaming" , type=UpperCAmelCase_ )
parser.add_argument("--local_rank" , type=UpperCAmelCase_ )
parser.add_argument("--num_workers" , type=UpperCAmelCase_ , default=0 )
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.streaming
UpperCamelCase = args.num_workers
UpperCamelCase = {"shards": [F"shard_{shard_idx}" for shard_idx in range(UpperCAmelCase_ )]}
UpperCamelCase = IterableDataset.from_generator(UpperCAmelCase_ , gen_kwargs=UpperCAmelCase_ )
if not streaming:
UpperCamelCase = Dataset.from_list(list(UpperCAmelCase_ ) )
UpperCamelCase = split_dataset_by_node(UpperCAmelCase_ , rank=UpperCAmelCase_ , world_size=UpperCAmelCase_ )
UpperCamelCase = torch.utils.data.DataLoader(UpperCAmelCase_ , num_workers=UpperCAmelCase_ )
UpperCamelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCamelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCamelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 556
| 1
|
import string
import numpy
def lowerCamelCase__ ( a : int , a : int ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , a )
class lowerCAmelCase_ :
lowerCamelCase_ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowerCamelCase_ = numpy.vectorize(lambda _a: x % 36)
lowerCamelCase_ = numpy.vectorize(_a)
def __init__( self : List[Any] , __A : numpy.ndarray ) ->None:
"""simple docstring"""
a__ :Tuple = self.modulus(__A ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
a__ :Dict = encrypt_key.shape[0]
def _snake_case ( self : Union[str, Any] , __A : str ) ->int:
"""simple docstring"""
return self.key_string.index(__A )
def _snake_case ( self : Optional[int] , __A : int ) ->str:
"""simple docstring"""
return self.key_string[round(__A )]
def _snake_case ( self : Any ) ->None:
"""simple docstring"""
a__ :Any = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
a__ :Optional[Any] = det % len(self.key_string )
a__ :List[str] = len(self.key_string )
if greatest_common_divisor(__A , len(self.key_string ) ) != 1:
a__ :List[Any] = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(__A )
def _snake_case ( self : Tuple , __A : str ) ->str:
"""simple docstring"""
a__ :Optional[Any] = [char for char in text.upper() if char in self.key_string]
a__ :Optional[int] = chars[-1]
while len(__A ) % self.break_key != 0:
chars.append(__A )
return "".join(__A )
def _snake_case ( self : List[str] , __A : str ) ->str:
"""simple docstring"""
a__ :Any = self.process_text(text.upper() )
a__ :Any = ""
for i in range(0 , len(__A ) - self.break_key + 1 , self.break_key ):
a__ :Optional[Any] = text[i : i + self.break_key]
a__ :Union[str, Any] = [self.replace_letters(__A ) for char in batch]
a__ :Any = numpy.array([vec] ).T
a__ :Union[str, Any] = self.modulus(self.encrypt_key.dot(__A ) ).T.tolist()[
0
]
a__ :Tuple = "".join(
self.replace_digits(__A ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _snake_case ( self : Optional[Any] ) ->numpy.ndarray:
"""simple docstring"""
a__ :int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
a__ :List[str] = det % len(self.key_string )
a__ :Union[str, Any] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
a__ :Union[str, Any] = i
break
a__ :str = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__A ) )
def _snake_case ( self : Tuple , __A : str ) ->str:
"""simple docstring"""
a__ :Union[str, Any] = self.make_decrypt_key()
a__ :Optional[int] = self.process_text(text.upper() )
a__ :str = ""
for i in range(0 , len(__A ) - self.break_key + 1 , self.break_key ):
a__ :Optional[Any] = text[i : i + self.break_key]
a__ :int = [self.replace_letters(__A ) for char in batch]
a__ :str = numpy.array([vec] ).T
a__ :Tuple = self.modulus(decrypt_key.dot(__A ) ).T.tolist()[0]
a__ :Union[str, Any] = "".join(
self.replace_digits(__A ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
a__ :List[Any] = int(input("Enter the order of the encryption key: " ) )
a__ :str = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(a ):
a__ :int = [int(a ) for x in input().split()]
hill_matrix.append(a )
a__ :Any = HillCipher(numpy.array(a ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
a__ :Tuple = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
a__ :Union[str, Any] = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(a ) )
elif option == "2":
a__ :List[str] = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 395
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
snake_case__ = logging.get_logger(__name__)
class lowerCAmelCase_ ( _a):
def __init__( self : str , *__A : Optional[int] , **__A : int ) ->None:
"""simple docstring"""
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , __A , )
super().__init__(*__A , **__A )
| 395
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : List[str] = """dpr"""
def __init__( self, snake_case__=3_05_22, snake_case__=7_68, snake_case__=12, snake_case__=12, snake_case__=30_72, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=5_12, snake_case__=2, snake_case__=0.02, snake_case__=1E-12, snake_case__=0, snake_case__="absolute", snake_case__ = 0, **snake_case__, ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case__, **snake_case__ )
lowercase_ : Tuple = vocab_size
lowercase_ : Dict = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : Any = hidden_act
lowercase_ : str = intermediate_size
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : Any = layer_norm_eps
lowercase_ : Dict = projection_dim
lowercase_ : List[str] = position_embedding_type
| 436
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : List[str] = """bridgetower_vision_model"""
def __init__( self, snake_case__=7_68, snake_case__=12, snake_case__=3, snake_case__=16, snake_case__=2_88, snake_case__=1, snake_case__=1E-05, snake_case__=False, snake_case__=True, snake_case__=False, **snake_case__, ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**snake_case__ )
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[int] = image_size
lowercase_ : Dict = initializer_factor
lowercase_ : Dict = layer_norm_eps
lowercase_ : Any = stop_gradient
lowercase_ : Union[str, Any] = share_layernorm
lowercase_ : Tuple = remove_last_layer
@classmethod
def snake_case__ ( cls, snake_case__, **snake_case__ ) -> "PretrainedConfig":
"""simple docstring"""
lowercase_ , lowercase_ : str = cls.get_config_dict(snake_case__, **snake_case__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowercase_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__, **snake_case__ )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : List[str] = """bridgetower_text_model"""
def __init__( self, snake_case__=5_02_65, snake_case__=7_68, snake_case__=12, snake_case__=12, snake_case__=1, snake_case__=30_72, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=5_14, snake_case__=1, snake_case__=1E-05, snake_case__=1, snake_case__=0, snake_case__=2, snake_case__="absolute", snake_case__=True, **snake_case__, ) -> Tuple:
"""simple docstring"""
super().__init__(**snake_case__ )
lowercase_ : Dict = vocab_size
lowercase_ : int = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : List[str] = hidden_act
lowercase_ : str = initializer_factor
lowercase_ : Dict = intermediate_size
lowercase_ : int = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : int = max_position_embeddings
lowercase_ : List[Any] = type_vocab_size
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : str = position_embedding_type
lowercase_ : Optional[int] = use_cache
lowercase_ : List[str] = pad_token_id
lowercase_ : str = bos_token_id
lowercase_ : str = eos_token_id
@classmethod
def snake_case__ ( cls, snake_case__, **snake_case__ ) -> "PretrainedConfig":
"""simple docstring"""
lowercase_ , lowercase_ : str = cls.get_config_dict(snake_case__, **snake_case__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowercase_ : Dict = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__, **snake_case__ )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Tuple = """bridgetower"""
def __init__( self, snake_case__=True, snake_case__="gelu", snake_case__=7_68, snake_case__=1, snake_case__=1E-05, snake_case__=False, snake_case__="add", snake_case__=12, snake_case__=6, snake_case__=False, snake_case__=False, snake_case__=None, snake_case__=None, **snake_case__, ) -> Tuple:
"""simple docstring"""
# TODO: remove this once the Hub files are updated.
lowercase_ : Optional[int] = kwargs.pop("""text_config_dict""", snake_case__ )
lowercase_ : Union[str, Any] = kwargs.pop("""vision_config_dict""", snake_case__ )
super().__init__(**snake_case__ )
lowercase_ : Union[str, Any] = share_cross_modal_transformer_layers
lowercase_ : List[str] = hidden_act
lowercase_ : Dict = hidden_size
lowercase_ : List[str] = initializer_factor
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Tuple = share_link_tower_layers
lowercase_ : Tuple = link_tower_type
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Union[str, Any] = tie_word_embeddings
lowercase_ : int = init_layernorm_from_vision_encoder
if text_config is None:
lowercase_ : Optional[int] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowercase_ : List[str] = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowercase_ : int = BridgeTowerTextConfig(**snake_case__ )
lowercase_ : List[Any] = BridgeTowerVisionConfig(**snake_case__ )
@classmethod
def snake_case__ ( cls, snake_case__, snake_case__, **snake_case__ ) -> List[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **snake_case__ )
def snake_case__ ( self ) -> str:
"""simple docstring"""
lowercase_ : Tuple = copy.deepcopy(self.__dict__ )
lowercase_ : str = self.text_config.to_dict()
lowercase_ : Dict = self.vision_config.to_dict()
lowercase_ : List[Any] = self.__class__.model_type
return output
| 436
| 1
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Optional[Any]):
torch.manual_seed(0)
lowerCAmelCase_ : Any = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : List[Any] = self.dummy_uncond_unet
lowerCAmelCase_ : int = ScoreSdeVeScheduler()
lowerCAmelCase_ : Optional[Any] = ScoreSdeVePipeline(unet=A_ , scheduler=A_)
sde_ve.to(A_)
sde_ve.set_progress_bar_config(disable=A_)
lowerCAmelCase_ : int = torch.manual_seed(0)
lowerCAmelCase_ : str = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=A_).images
lowerCAmelCase_ : Tuple = torch.manual_seed(0)
lowerCAmelCase_ : List[Any] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=A_ , return_dict=A_)[
0
]
lowerCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : List[Any] = '''google/ncsnpp-church-256'''
lowerCAmelCase_ : int = UNetaDModel.from_pretrained(A_)
lowerCAmelCase_ : Union[str, Any] = ScoreSdeVeScheduler.from_pretrained(A_)
lowerCAmelCase_ : str = ScoreSdeVePipeline(unet=A_ , scheduler=A_)
sde_ve.to(A_)
sde_ve.set_progress_bar_config(disable=A_)
lowerCAmelCase_ : str = torch.manual_seed(0)
lowerCAmelCase_ : str = sde_ve(num_inference_steps=1_0 , output_type='''numpy''' , generator=A_).images
lowerCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
lowerCAmelCase_ : int = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 171
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Any=None ,__UpperCamelCase : List[Any]=None ):
if attention_mask is None:
lowerCAmelCase_ : Dict = tf.cast(tf.math.not_equal(__UpperCamelCase ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __snake_case :
_a = OPTConfig
_a = {}
_a = '''gelu'''
def __init__( self : List[Any] , A_ : List[str] , A_ : str=1_3 , A_ : int=7 , A_ : Tuple=True , A_ : int=False , A_ : Union[str, Any]=9_9 , A_ : str=1_6 , A_ : Tuple=2 , A_ : List[Any]=4 , A_ : Optional[int]=4 , A_ : Any="gelu" , A_ : Optional[int]=0.1 , A_ : Union[str, Any]=0.1 , A_ : str=2_0 , A_ : Dict=2 , A_ : int=1 , A_ : List[str]=0 , A_ : Optional[Any]=1_6 , A_ : str=1_6 , ):
lowerCAmelCase_ : Optional[Any] = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : Tuple = seq_length
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Union[str, Any] = intermediate_size
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : Any = eos_token_id
lowerCAmelCase_ : Dict = pad_token_id
lowerCAmelCase_ : List[str] = bos_token_id
lowerCAmelCase_ : Dict = embed_dim
lowerCAmelCase_ : Optional[Any] = word_embed_proj_dim
lowerCAmelCase_ : Optional[int] = False
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
lowerCAmelCase_ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
lowerCAmelCase_ : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1)
lowerCAmelCase_ : Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=A_ , **self.config_updates , )
lowerCAmelCase_ : Union[str, Any] = prepare_opt_inputs_dict(A_ , A_)
return config, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Tuple , A_ : Optional[Any]):
lowerCAmelCase_ : List[str] = TFOPTModel(config=A_)
lowerCAmelCase_ : int = inputs_dict['''input_ids''']
lowerCAmelCase_ : Tuple = input_ids[:1, :]
lowerCAmelCase_ : Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
lowerCAmelCase_ : Optional[int] = 1
# first forward pass
lowerCAmelCase_ : str = model(A_ , attention_mask=A_ , use_cache=A_)
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size)
lowerCAmelCase_ : int = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
lowerCAmelCase_ : int = tf.concat([input_ids, next_tokens] , axis=-1)
lowerCAmelCase_ : int = tf.concat([attention_mask, next_attn_mask] , axis=-1)
lowerCAmelCase_ : Optional[int] = model(A_ , attention_mask=A_)[0]
lowerCAmelCase_ : str = model(A_ , attention_mask=A_ , past_key_values=A_)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
lowerCAmelCase_ : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1]))
lowerCAmelCase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase_ : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3)
@require_tf
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_a = (TFOPTForCausalLM,) if is_tf_available() else ()
_a = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
_a = False
_a = False
_a = False
_a = 10
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Dict = TFOPTModelTester(self)
lowerCAmelCase_ : Any = ConfigTester(self , config_class=A_)
def UpperCAmelCase__ ( self : Dict):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_)
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(A_ : List[Any] , A_ : Tuple):
if hasattr(A_ , '''weight'''):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(A_ , '''weight'''):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCAmelCase_ : Optional[int] = model_class(config=A_)
lowerCAmelCase_ : Any = _get_word_embedding_weight(A_ , model.get_input_embeddings())
lowerCAmelCase_ : Union[str, Any] = _get_word_embedding_weight(A_ , model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(A_)
lowerCAmelCase_ : Union[str, Any] = _get_word_embedding_weight(A_ , model.get_input_embeddings())
lowerCAmelCase_ : List[Any] = _get_word_embedding_weight(A_ , model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
lowerCAmelCase_ : Any = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , A_)
# check that weights remain the same after resizing
lowerCAmelCase_ : Optional[int] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
lowerCAmelCase_ : str = False
self.assertTrue(A_)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , A_)
lowerCAmelCase_ : int = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
lowerCAmelCase_ : List[Any] = False
self.assertTrue(A_)
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ):
return tf.constant(__UpperCamelCase ,dtype=tf.intaa )
@require_tf
class __snake_case ( unittest.TestCase ):
_a = 99
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Union[str, Any] = tf.ones((4, 1) , dtype=tf.intaa) * 2
lowerCAmelCase_ : Optional[Any] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3) + 3, eos_column_vector] , axis=1)
lowerCAmelCase_ : str = input_ids.shape[0]
lowerCAmelCase_ : Optional[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Union[str, Any] = TFOPTModel.from_pretrained('''facebook/opt-350m''')
lowerCAmelCase_ : Union[str, Any] = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
lowerCAmelCase_ : Optional[Any] = tf.not_equal(A_ , model.config.pad_token_id)
with tf.GradientTape():
lowerCAmelCase_ : List[str] = model(input_ids=A_ , attention_mask=A_).last_hidden_state
lowerCAmelCase_ : int = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , A_)
lowerCAmelCase_ : Union[str, Any] = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]])
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4e-3))
lowerCAmelCase_ : Any = tf.function(A_ , jit_compile=A_)
lowerCAmelCase_ : Tuple = xla_generate(A_ , A_)[0]
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4e-2))
@require_tf
@slow
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict):
super().setUp()
lowerCAmelCase_ : List[Any] = '''facebook/opt-350m'''
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : List[str] = TFOPTForCausalLM.from_pretrained(self.path_model)
lowerCAmelCase_ : Optional[Any] = GPTaTokenizer.from_pretrained(self.path_model)
lowerCAmelCase_ : Any = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase_ : str = tokenizer(A_ , return_tensors='''tf''' , padding=A_ , add_special_tokens=A_)
lowerCAmelCase_ : Union[str, Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
lowerCAmelCase_ : Union[str, Any] = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
])
self.assertTrue(np.allclose(A_ , A_ , atol=1e-4))
lowerCAmelCase_ : str = tf.function(A_ , jit_compile=A_)
lowerCAmelCase_ : Any = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
self.assertTrue(np.allclose(A_ , A_ , atol=1e-4))
@require_tf
@slow
class __snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Union[str, Any]):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Dict = '''facebook/opt-125m'''
lowerCAmelCase_ : Optional[Any] = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : int = GPTaTokenizer.from_pretrained(A_)
lowerCAmelCase_ : str = TFOPTForCausalLM.from_pretrained(A_)
for prompt in self.prompts:
lowerCAmelCase_ : Any = tokenizer(A_ , return_tensors='''tf''').input_ids
lowerCAmelCase_ : Any = model.generate(A_ , max_length=1_0)
lowerCAmelCase_ : Dict = tokenizer.batch_decode(A_ , skip_special_tokens=A_)
predicted_outputs += generated_string
self.assertListEqual(A_ , A_)
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : str = '''facebook/opt-350m'''
lowerCAmelCase_ : str = GPTaTokenizer.from_pretrained(A_)
lowerCAmelCase_ : int = TFOPTForCausalLM.from_pretrained(A_)
lowerCAmelCase_ : Dict = '''left'''
# use different length sentences to test batching
lowerCAmelCase_ : Optional[int] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
lowerCAmelCase_ : List[Any] = tokenizer(A_ , return_tensors='''tf''' , padding=A_)
lowerCAmelCase_ : str = inputs['''input_ids''']
lowerCAmelCase_ : Dict = model.generate(input_ids=A_ , attention_mask=inputs['''attention_mask'''])
lowerCAmelCase_ : Any = tokenizer(sentences[0] , return_tensors='''tf''').input_ids
lowerCAmelCase_ : Tuple = model.generate(input_ids=A_)
lowerCAmelCase_ : Any = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa))
lowerCAmelCase_ : List[str] = tokenizer(sentences[1] , return_tensors='''tf''').input_ids
lowerCAmelCase_ : Any = model.generate(input_ids=A_ , max_length=model.config.max_length - num_paddings)
lowerCAmelCase_ : Optional[int] = tokenizer.batch_decode(A_ , skip_special_tokens=A_)
lowerCAmelCase_ : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_)
lowerCAmelCase_ : int = tokenizer.decode(output_padded[0] , skip_special_tokens=A_)
lowerCAmelCase_ : Tuple = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(A_ , A_)
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence])
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Optional[int] = '''facebook/opt-350m'''
lowerCAmelCase_ : Optional[Any] = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = GPTaTokenizer.from_pretrained(A_)
lowerCAmelCase_ : Optional[Any] = TFOPTForCausalLM.from_pretrained(A_)
for prompt in self.prompts:
lowerCAmelCase_ : Tuple = tokenizer(A_ , return_tensors='''tf''').input_ids
lowerCAmelCase_ : int = model.generate(A_ , max_length=1_0)
lowerCAmelCase_ : Tuple = tokenizer.batch_decode(A_ , skip_special_tokens=A_)
predicted_outputs += generated_string
self.assertListEqual(A_ , A_)
| 171
| 1
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_a : Tuple = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase__ ( _A: Optional[int] ):
'''simple docstring'''
__lowerCamelCase = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(_A , _A )
_a : int = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase__ ( _A: Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = list(s_dict.keys() )
for key in keys:
__lowerCamelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__lowerCamelCase = new_key.replace(_A , _A )
print(f'''{key} -> {new_key}''' )
__lowerCamelCase = s_dict.pop(_A )
return s_dict
def UpperCamelCase__ ( _A: List[str] ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(_A , _A , bias=_A )
__lowerCamelCase = emb.weight.data
return lin_layer
def UpperCamelCase__ ( _A: str , _A: str ):
'''simple docstring'''
os.makedirs(_A , exist_ok=_A )
__lowerCamelCase = os.path.basename(_A )
__lowerCamelCase = url.split("""/""" )[-2]
__lowerCamelCase = os.path.join(_A , _A )
if os.path.exists(_A ) and not os.path.isfile(_A ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_A ):
__lowerCamelCase = open(_A , """rb""" ).read()
if hashlib.shaaaa(_A ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_A ) as source, open(_A , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=_A , unit_divisor=1024 ) as loop:
while True:
__lowerCamelCase = source.read(8192 )
if not buffer:
break
output.write(_A )
loop.update(len(_A ) )
__lowerCamelCase = open(_A , """rb""" ).read()
if hashlib.shaaaa(_A ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase__ ( _A: str , _A: Any ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
__lowerCamelCase = _download(_MODELS[checkpoint_path] )
else:
__lowerCamelCase = torch.load(_A , map_location="""cpu""" )
__lowerCamelCase = original_checkpoint["""dims"""]
__lowerCamelCase = original_checkpoint["""model_state_dict"""]
__lowerCamelCase = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(_A )
rename_keys(_A )
__lowerCamelCase = True
__lowerCamelCase = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
__lowerCamelCase = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=_A , decoder_ffn_dim=_A , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
__lowerCamelCase = WhisperForConditionalGeneration(_A )
__lowerCamelCase , __lowerCamelCase = model.model.load_state_dict(_A , strict=_A )
if len(_A ) > 0 and not set(_A ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
__lowerCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__lowerCamelCase = proj_out_weights
model.save_pretrained(_A )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_a : List[Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 704
|
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = ['''image_processor''', '''feature_extractor''']
A = '''TvltImageProcessor'''
A = '''TvltFeatureExtractor'''
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
super().__init__(image_processor=UpperCAmelCase , feature_extractor=UpperCAmelCase )
__lowerCamelCase = image_processor
__lowerCamelCase = feature_extractor
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , *UpperCAmelCase , **UpperCAmelCase , ):
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowerCamelCase = None
if images is not None:
__lowerCamelCase = self.image_processor(UpperCAmelCase , mask_pixel=UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
if images_mixed is not None:
__lowerCamelCase = self.image_processor(UpperCAmelCase , is_mixed=UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
if audio is not None:
__lowerCamelCase = self.feature_extractor(
UpperCAmelCase , *UpperCAmelCase , sampling_rate=UpperCAmelCase , mask_audio=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase = {}
if audio is not None:
output_dict.update(UpperCAmelCase )
if images is not None:
output_dict.update(UpperCAmelCase )
if images_mixed_dict is not None:
output_dict.update(UpperCAmelCase )
return output_dict
@property
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.image_processor.model_input_names
__lowerCamelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 571
| 0
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def A_ ( __lowercase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def A_ ( __lowercase ):
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase_ : Union[str, Any] =ord(__lowercase )
if not _is_chinese_char(__lowercase ):
return 0
return 1
def A_ ( __lowercase ):
UpperCamelCase_ : List[str] =set()
for token in tokens:
UpperCamelCase_ : Optional[int] =len(__lowercase ) > 1 and is_chinese(__lowercase )
if chinese_word:
word_set.add(__lowercase )
UpperCamelCase_ : Tuple =list(__lowercase )
return word_list
def A_ ( __lowercase , __lowercase ):
if not chinese_word_set:
return bert_tokens
UpperCamelCase_ : List[str] =max([len(__lowercase ) for w in chinese_word_set] )
UpperCamelCase_ : Optional[Any] =bert_tokens
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] =0, len(__lowercase )
while start < end:
UpperCamelCase_ : str =True
if is_chinese(bert_word[start] ):
UpperCamelCase_ : Optional[int] =min(end - start , __lowercase )
for i in range(__lowercase , 1 , -1 ):
UpperCamelCase_ : Tuple =''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase_ : Tuple ='##' + bert_word[j]
UpperCamelCase_ : int =start + i
UpperCamelCase_ : Dict =False
break
if single_word:
start += 1
return bert_word
def A_ ( __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : Tuple =[]
for i in range(0 , len(__lowercase ) , 1_00 ):
UpperCamelCase_ : Union[str, Any] =ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
UpperCamelCase_ : int =[get_chinese_word(__lowercase ) for r in res]
ltp_res.extend(__lowercase )
assert len(__lowercase ) == len(__lowercase )
UpperCamelCase_ : Dict =[]
for i in range(0 , len(__lowercase ) , 1_00 ):
UpperCamelCase_ : int =bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowercase , truncation=__lowercase , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(__lowercase ) == len(__lowercase )
UpperCamelCase_ : Dict =[]
for input_ids, chinese_word in zip(__lowercase , __lowercase ):
UpperCamelCase_ : List[str] =[]
for id in input_ids:
UpperCamelCase_ : Union[str, Any] =bert_tokenizer._convert_id_to_token(__lowercase )
input_tokens.append(__lowercase )
UpperCamelCase_ : Optional[int] =add_sub_symbol(__lowercase , __lowercase )
UpperCamelCase_ : Dict =[]
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowercase ):
if token[:2] == "##":
UpperCamelCase_ : Optional[int] =token[2:]
# save chinese tokens' pos
if len(__lowercase ) == 1 and _is_chinese_char(ord(__lowercase ) ):
ref_id.append(__lowercase )
ref_ids.append(__lowercase )
assert len(__lowercase ) == len(__lowercase )
return ref_ids
def A_ ( __lowercase ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
UpperCamelCase_ : Tuple =f.readlines()
UpperCamelCase_ : Optional[int] =[line.strip() for line in data if len(__lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase_ : Optional[Any] =LTP(args.ltp ) # faster in GPU device
UpperCamelCase_ : Dict =BertTokenizer.from_pretrained(args.bert )
UpperCamelCase_ : int =prepare_ref(__lowercase , __lowercase , __lowercase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
UpperCamelCase_ : Tuple =[json.dumps(__lowercase ) + '\n' for ref in ref_ids]
f.writelines(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 357
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ :List[Any] = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Any = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Union[str, Any] = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
A_ :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 154
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def A ( a_ = "" ) -> dict[str, float]:
__UpperCamelCase : Tuple =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__UpperCamelCase : Optional[int] =BeautifulSoup(requests.get(a_ ).text ,'html.parser' )
__UpperCamelCase : Union[str, Any] =soup.find_all('td' ,attrs='titleColumn' )
__UpperCamelCase : Any =soup.find_all('td' ,class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(a_ ,a_ )
}
def A ( a_ = "IMDb_Top_250_Movies.csv" ) -> None:
__UpperCamelCase : Dict =get_imdb_top_aaa_movies()
with open(a_ ,'w' ,newline='' ) as out_file:
__UpperCamelCase : Any =csv.writer(a_ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 154
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : List[str] = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ : Dict = '''BlipImageProcessor'''
UpperCamelCase_ : Optional[int] = '''AutoTokenizer'''
def __init__( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
# add QFormer tokenizer
UpperCamelCase = qformer_tokenizer
def __call__( self : Tuple , UpperCAmelCase_ : ImageInput = None , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : str , )-> BatchFeature:
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
UpperCamelCase = BatchFeature()
if text is not None:
UpperCamelCase = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
encoding.update(UpperCAmelCase_ )
UpperCamelCase = self.qformer_tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase = qformer_text_encoding.pop("input_ids" )
UpperCamelCase = qformer_text_encoding.pop("attention_mask" )
if images is not None:
UpperCamelCase = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def _SCREAMING_SNAKE_CASE ( self : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[str] )-> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] )-> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _SCREAMING_SNAKE_CASE ( self : Any )-> Tuple:
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple )-> Union[str, Any]:
"""simple docstring"""
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
UpperCamelCase = os.path.join(UpperCAmelCase_ , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ )
return super().save_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , UpperCAmelCase_ : int , **UpperCAmelCase_ : int )-> int:
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ , subfolder="qformer_tokenizer" )
UpperCamelCase = cls._get_arguments_from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
args.append(UpperCAmelCase_ )
return cls(*UpperCAmelCase_ )
| 554
| 0
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
lowercase : int = logging.getLogger(__name__)
def snake_case__ ( lowerCamelCase_ ):
A : Dict = git.Repo(search_parent_directories=lowerCamelCase_ )
A : Optional[Any] = {
'''repo_id''': str(lowerCamelCase_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(lowerCamelCase_ , '''git_log.json''' ) , '''w''' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=4 )
def snake_case__ ( lowerCamelCase_ ):
if params.n_gpu <= 0:
A : int = 0
A : Dict = -1
A : Dict = True
A : Any = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
A : int = int(os.environ['''WORLD_SIZE'''] )
A : Optional[int] = int(os.environ['''N_GPU_NODE'''] )
A : Any = int(os.environ['''RANK'''] )
# number of nodes / node ID
A : List[Any] = params.world_size // params.n_gpu_per_node
A : int = params.global_rank // params.n_gpu_per_node
A : Optional[int] = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
A : str = 1
A : Optional[Any] = 0
A : List[str] = 0
A : Optional[Any] = 0
A : Union[str, Any] = 1
A : Tuple = 1
A : Dict = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
A : Dict = params.node_id == 0 and params.local_rank == 0
A : Dict = params.n_nodes > 1
# summary
A : Tuple = F'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def snake_case__ ( lowerCamelCase_ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 423
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=4_00 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 2_55 , __UpperCAmelCase=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : List[Any] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
A : List[Any] = parent
A : Dict = batch_size
A : Optional[Any] = num_channels
A : Union[str, Any] = min_resolution
A : int = max_resolution
A : Optional[int] = do_resize
A : Dict = size
A : List[Any] = do_normalize
A : int = image_mean
A : List[str] = image_std
A : Optional[int] = do_rescale
A : Any = rescale_factor
A : int = do_pad
def snake_case ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[Any]:
if not batched:
A : List[Any] = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
A , A : Tuple = image.size
else:
A , A : Dict = image.shape[1], image.shape[2]
if w < h:
A : str = int(self.size['''shortest_edge'''] * h / w )
A : Dict = self.size['''shortest_edge''']
elif w > h:
A : Union[str, Any] = self.size['''shortest_edge''']
A : Any = int(self.size['''shortest_edge'''] * w / h )
else:
A : Any = self.size['''shortest_edge''']
A : Union[str, Any] = self.size['''shortest_edge''']
else:
A : Optional[Any] = []
for image in image_inputs:
A , A : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : Optional[int] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
A : Union[str, Any] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Dict = ConditionalDetrImageProcessor if is_vision_available() else None
def snake_case ( self ) -> Optional[int]:
A : str = ConditionalDetrImageProcessingTester(self )
@property
def snake_case ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ) -> str:
A : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
def snake_case ( self ) -> List[Any]:
A : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
A : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def snake_case ( self ) -> Dict:
pass
def snake_case ( self ) -> Tuple:
# Initialize image_processing
A : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
A : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A , A : Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A : Optional[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
A : Any = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ) -> Optional[Any]:
# Initialize image_processing
A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
A : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A , A : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Any = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
A , A : Union[str, Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ) -> List[str]:
# Initialize image_processing
A : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
A : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A , A : Dict = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : int = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
A , A : Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ) -> Optional[Any]:
# prepare image and target
A : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
A : int = json.loads(f.read() )
A : Tuple = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
A : Tuple = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
A : Union[str, Any] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
A : Optional[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
A : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
A : Optional[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
A : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
A : Tuple = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
A : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
A : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
A : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify orig_size
A : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
A : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
@slow
def snake_case ( self ) -> Tuple:
# prepare image, target and masks_path
A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
A : Optional[Any] = json.loads(f.read() )
A : int = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
A : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
A : Dict = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
A : List[str] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
A : Any = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
A : Dict = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
A : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
A : Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
A : List[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
A : int = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
A : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
A : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify masks
A : Dict = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __UpperCAmelCase )
# verify orig_size
A : Tuple = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
A : int = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
| 423
| 1
|
"""simple docstring"""
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE__ ( torch.nn.Module ):
def __init__( self : int , lowerCAmelCase : List[Any]="sayef/fsner-bert-base-uncased" ):
super(lowerCAmelCase , self ).__init__()
lowerCAmelCase = AutoModel.from_pretrained(lowerCAmelCase , return_dict=lowerCAmelCase )
lowerCAmelCase = torch.nn.CosineSimilarity(3 , 1e-08 )
lowerCAmelCase = torch.nn.Softmax(dim=1 )
def __lowercase ( self : Union[str, Any] , **lowerCAmelCase : List[Any] ):
return self.bert(**lowerCAmelCase ).last_hidden_state
def __lowercase ( self : Any , lowerCAmelCase : str ):
return token_embeddings.sum(2 , keepdim=lowerCAmelCase )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple=1 ):
return self.softmax(T * self.cos(lowerCAmelCase , lowerCAmelCase ) )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : str ):
lowerCAmelCase = W_supports["""sizes"""].tolist()
lowerCAmelCase = W_supports["""start_token_id"""].item()
lowerCAmelCase = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCAmelCase = self.BERT(**lowerCAmelCase )
lowerCAmelCase = self.BERT(**lowerCAmelCase )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = W_supports["""input_ids"""] == start_token_id
lowerCAmelCase = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(lowerCAmelCase ):
if i == 0:
lowerCAmelCase = 0
else:
lowerCAmelCase = support_sizes[i - 1]
lowerCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
lowerCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
lowerCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowerCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCAmelCase = torch.vstack((p_starts, p_start) )
lowerCAmelCase = torch.vstack((p_ends, p_end) )
else:
lowerCAmelCase = p_start
lowerCAmelCase = p_end
return p_starts, p_ends
| 169
|
"""simple docstring"""
def lowercase (snake_case__ : int = 200 ) -> int:
'''simple docstring'''
lowerCAmelCase = [1, 2, 5, 10, 20, 50, 100, 200]
lowerCAmelCase = [0] * (pence + 1)
lowerCAmelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 169
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE ( ) -> Any:
lowerCamelCase__ : int = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowercase_ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowercase_ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowercase_ )
return parser.parse_args()
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
lowerCamelCase__ : List[str] = parse_args()
# Import training_script as a module.
lowerCamelCase__ : List[str] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase__ : str = script_fpath.stem
lowerCamelCase__ : str = importlib.import_module(lowercase_ )
# Patch sys.argv
lowerCamelCase__ : Optional[int] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 720
|
from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100_0000 , _UpperCAmelCase = 10 ) -> int:
lowerCamelCase__ : defaultdict = defaultdict(_UpperCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCamelCase__ : List[str] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCamelCase__ : int = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 188
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int ):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('Input value must be an \'int\' type' )
UpperCAmelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 447
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = 'ylacombe/bark-small'
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = 'en_speaker_1'
UpperCAmelCase = 'This is a test string'
UpperCAmelCase = 'speaker_embeddings_path.json'
UpperCAmelCase = 'speaker_embeddings'
def snake_case_ ( self , **a_ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **a_ )
def snake_case_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BarkProcessor(tokenizer=a_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase = 3_5
UpperCAmelCase = 2
UpperCAmelCase = 8
UpperCAmelCase = {
'semantic_prompt': np.ones(a_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase = processor(text=self.input_string , voice_preset=a_ )
UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(a_ , **a_ )
UpperCAmelCase = processor(text=self.input_string , voice_preset=a_ )
UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BarkProcessor(tokenizer=a_ )
UpperCAmelCase = processor(text=self.input_string )
UpperCAmelCase = tokenizer(
self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=a_ , return_attention_mask=a_ , return_token_type_ids=a_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 447
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : int ) -> list:
A_ = word.split()
def justify(_UpperCamelCase : list, _UpperCamelCase : int, _UpperCamelCase : int ) -> str:
A_ = max_width - width
A_ = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
A_ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
A_ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
A_ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
A_ = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
A_ = []
A_ = []
A_ = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
A_ = [word], len(_lowercase )
A_ = max_width - width - len(_lowercase )
answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 709
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.02 , ) -> List[Any]:
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = rotary_dim
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = None
A_ = vocab_size - 1
A_ = vocab_size - 1
A_ = vocab_size - 1
def __A ( self ) -> List[str]:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __A ( self ) -> Any:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
A_ = 20
A_ = model_class_name(_SCREAMING_SNAKE_CASE )
A_ = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
A_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A_ = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
A_ = 20
A_ = model_class_name(_SCREAMING_SNAKE_CASE )
A_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
A_ = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
A_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A_ = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowercase : int = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __A ( self ) -> List[Any]:
A_ = FlaxGPTJModelTester(self )
def __A ( self ) -> Any:
for model_class_name in self.all_model_classes:
A_ ,A_ ,A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
A_ ,A_ ,A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __A ( self ) -> Any:
A_ = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
A_ = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
A_ = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
A_ = False
A_ = model.config.eos_token_id
A_ = jax.jit(model.generate )
A_ = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
A_ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
A_ = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ ,A_ = pt_inputs['''input_ids'''].shape
A_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
A_ = 0
A_ = 1
A_ = 0
A_ = 1
A_ = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
A_ = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
A_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
A_ = fx_state
with torch.no_grad():
A_ = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
A_ = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
A_ = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
A_ = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
A_ = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
A_ ,A_ = pt_inputs['''input_ids'''].shape
A_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
A_ = 0
A_ = 1
A_ = 0
A_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A_ = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
A_ = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
A_ = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __A ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
A_ = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
A_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 174
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
_lowercase = TypeVar('''T''')
class __a ( Generic[T] ):
'''simple docstring'''
def __init__( self , _lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = data
__lowercase = self
__lowercase = 0
class __a ( Generic[T] ):
'''simple docstring'''
def __init__( self ) -> None:
'''simple docstring'''
# map from node name to the node object
__lowercase = {}
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> None:
'''simple docstring'''
# create a new set with x as its member
__lowercase = DisjointSetTreeNode(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> DisjointSetTreeNode[T]:
'''simple docstring'''
# find the set x belongs to (with path-compression)
__lowercase = self.map[data]
if elem_ref != elem_ref.parent:
__lowercase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
# helper function for union operation
if nodea.rank > nodea.rank:
__lowercase = nodea
else:
__lowercase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
# merge 2 disjoint sets
self.link(self.find_set(_lowerCamelCase ) , self.find_set(_lowerCamelCase ) )
class __a ( Generic[T] ):
'''simple docstring'''
def __init__( self ) -> None:
'''simple docstring'''
# connections: map from the node to the neighbouring nodes (with weights)
__lowercase = {}
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> None:
'''simple docstring'''
# add a node ONLY if its not present in the graph
if node not in self.connections:
__lowercase = {}
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
# add an edge with the given weight
self.add_node(_lowerCamelCase )
self.add_node(_lowerCamelCase )
__lowercase = weight
__lowercase = weight
def SCREAMING_SNAKE_CASE ( self ) -> GraphUndirectedWeighted[T]:
'''simple docstring'''
__lowercase = []
__lowercase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _lowerCamelCase : x[2] )
# creating the disjoint set
__lowercase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_lowerCamelCase )
# MST generation
__lowercase = 0
__lowercase = 0
__lowercase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__lowercase , __lowercase , __lowercase = edges[index]
index += 1
__lowercase = disjoint_set.find_set(_lowerCamelCase )
__lowercase = disjoint_set.find_set(_lowerCamelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
disjoint_set.union(_lowerCamelCase , _lowerCamelCase )
return graph
| 118
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : List[Any] = """codegen"""
_lowerCamelCase : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _lowerCamelCase=50_400 , _lowerCamelCase=2_048 , _lowerCamelCase=2_048 , _lowerCamelCase=4_096 , _lowerCamelCase=28 , _lowerCamelCase=16 , _lowerCamelCase=64 , _lowerCamelCase=None , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=50_256 , _lowerCamelCase=50_256 , _lowerCamelCase=False , **_lowerCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = n_ctx
__lowercase = n_positions
__lowercase = n_embd
__lowercase = n_layer
__lowercase = n_head
__lowercase = n_inner
__lowercase = rotary_dim
__lowercase = activation_function
__lowercase = resid_pdrop
__lowercase = embd_pdrop
__lowercase = attn_pdrop
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
super().__init__(
bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class __a ( __a ):
'''simple docstring'''
def __init__( self , _lowerCamelCase , _lowerCamelCase = "default" , _lowerCamelCase = None , _lowerCamelCase = False , ) -> str:
'''simple docstring'''
super().__init__(_lowerCamelCase , task=_lowerCamelCase , patching_specs=_lowerCamelCase , use_past=_lowerCamelCase )
if not getattr(self._config , "pad_token_id" , _lowerCamelCase ):
# TODO: how to do that better?
__lowercase = 0
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowercase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction="inputs" )
__lowercase = {0: "batch", 1: "past_sequence + sequence"}
else:
__lowercase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowercase = super(_lowerCamelCase , self ).generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
# We need to order the input in the way they appears in the forward()
__lowercase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowercase , __lowercase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(self.num_layers )
]
__lowercase = common_inputs["attention_mask"]
if self.use_past:
__lowercase = ordered_inputs["attention_mask"].dtype
__lowercase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 13
| 118
| 1
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[str], a_: List[str]=13, a_: Dict=7, a_: List[Any]=True, a_: List[Any]=True, a_: Union[str, Any]=True, a_: Optional[Any]=True, a_: Optional[int]=99, a_: List[Any]=16, a_: Optional[Any]=36, a_: Optional[int]=6, a_: Union[str, Any]=6, a_: Optional[int]=6, a_: Dict=37, a_: List[Any]="gelu", a_: Tuple=0.1, a_: Optional[Any]=0.1, a_: List[Any]=512, a_: Dict=16, a_: Union[str, Any]=2, a_: Dict=0.02, a_: Optional[Any]=3, a_: Dict=4, a_: Dict=None, ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : List[Any] = batch_size
_snake_case : Any = seq_length
_snake_case : List[str] = is_training
_snake_case : int = use_input_mask
_snake_case : List[str] = use_token_type_ids
_snake_case : int = use_labels
_snake_case : Dict = vocab_size
_snake_case : List[Any] = embedding_size
_snake_case : Dict = hidden_size
_snake_case : Any = num_hidden_layers
_snake_case : Optional[Any] = num_hidden_groups
_snake_case : Optional[int] = num_attention_heads
_snake_case : List[str] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : List[Any] = type_vocab_size
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Optional[int] = initializer_range
_snake_case : str = num_labels
_snake_case : List[Any] = num_choices
_snake_case : Any = scope
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_snake_case : Optional[int] = None
if self.use_input_mask:
_snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Optional[Any] = None
if self.use_token_type_ids:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_snake_case : Tuple = None
_snake_case : List[str] = None
_snake_case : int = None
if self.use_labels:
_snake_case : Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_snake_case : str = ids_tensor([self.batch_size], self.num_choices )
_snake_case : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, num_hidden_groups=self.num_hidden_groups, )
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: Dict, a_: int, a_: List[str], a_: List[str], a_: Any, a_: List[Any] ):
'''simple docstring'''
_snake_case : int = AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case : Any = model(__lowerCamelCase, attention_mask=__lowerCamelCase, token_type_ids=__lowerCamelCase )
_snake_case : Any = model(__lowerCamelCase, token_type_ids=__lowerCamelCase )
_snake_case : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Union[str, Any], a_: Optional[Any], a_: Dict, a_: str, a_: Union[str, Any], a_: int, a_: Tuple ):
'''simple docstring'''
_snake_case : Any = AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case : Union[str, Any] = model(
__lowerCamelCase, attention_mask=__lowerCamelCase, token_type_ids=__lowerCamelCase, labels=__lowerCamelCase, sentence_order_label=__lowerCamelCase, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, config.num_labels) )
def UpperCamelCase_ ( self: str, a_: str, a_: List[Any], a_: Any, a_: str, a_: List[Any], a_: Optional[int], a_: Tuple ):
'''simple docstring'''
_snake_case : Dict = AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case : Tuple = model(__lowerCamelCase, attention_mask=__lowerCamelCase, token_type_ids=__lowerCamelCase, labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[int], a_: List[Any], a_: Optional[Any], a_: Optional[Any], a_: Optional[Any], a_: Any, a_: int ):
'''simple docstring'''
_snake_case : str = AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case : str = model(
__lowerCamelCase, attention_mask=__lowerCamelCase, token_type_ids=__lowerCamelCase, start_positions=__lowerCamelCase, end_positions=__lowerCamelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self: Optional[int], a_: Tuple, a_: Optional[Any], a_: Optional[Any], a_: Optional[int], a_: Optional[int], a_: Union[str, Any], a_: Dict ):
'''simple docstring'''
_snake_case : int = self.num_labels
_snake_case : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case : Any = model(__lowerCamelCase, attention_mask=__lowerCamelCase, token_type_ids=__lowerCamelCase, labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Dict, a_: Dict, a_: List[str], a_: List[str], a_: Any, a_: Dict, a_: Any, a_: List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.num_labels
_snake_case : List[str] = AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case : Tuple = model(__lowerCamelCase, attention_mask=__lowerCamelCase, token_type_ids=__lowerCamelCase, labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self: Tuple, a_: Any, a_: Optional[Any], a_: List[Any], a_: List[Any], a_: Any, a_: Any, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.num_choices
_snake_case : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case : Any = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : Dict = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
_snake_case : int = model(
__lowerCamelCase, attention_mask=__lowerCamelCase, token_type_ids=__lowerCamelCase, labels=__lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : int = self.prepare_config_and_inputs()
(
_snake_case
) : Union[str, Any] = config_and_inputs
_snake_case : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def UpperCamelCase_ ( self: List[str], a_: int, a_: Tuple, a_: Dict=False ):
'''simple docstring'''
_snake_case : Any = super()._prepare_for_class(__lowerCamelCase, __lowerCamelCase, return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
_snake_case : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=__lowerCamelCase )
_snake_case : List[str] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=__lowerCamelCase )
return inputs_dict
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = AlbertModelTester(self )
_snake_case : Optional[Any] = ConfigTester(self, config_class=__lowerCamelCase, hidden_size=37 )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case : Dict = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = AlbertModel.from_pretrained("""albert-base-v2""" )
_snake_case : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_snake_case : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case : List[Any] = model(__lowerCamelCase, attention_mask=__lowerCamelCase )[0]
_snake_case : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, __lowerCamelCase )
_snake_case : Dict = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], __lowerCamelCase, atol=1E-4 ) )
| 705
|
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28
| 0
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = "https://openaipublic.azureedge.net/jukebox/models/"
lowerCamelCase : str = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def _SCREAMING_SNAKE_CASE ( lowercase : Any ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
lowerCamelCase_ = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
lowerCamelCase_ = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
lowerCamelCase_ = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
lowerCamelCase_ = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
lowerCamelCase_ = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
lowerCamelCase_ = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase_ = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
lowerCamelCase_ = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Any , lowercase : Union[str, Any] , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = {}
import re
lowerCamelCase_ = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
lowerCamelCase_ = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
lowerCamelCase_ = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
lowerCamelCase_ = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
lowerCamelCase_ = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
lowerCamelCase_ = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
lowerCamelCase_ = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
lowerCamelCase_ = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
lowerCamelCase_ = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowercase ):
lowerCamelCase_ = re_encoder_block_conv_in.match(lowercase )
lowerCamelCase_ = regex_match.groups()
lowerCamelCase_ = int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
lowerCamelCase_ = re_encoder_block_conv_in.sub(lowercase , lowercase )
elif re_encoder_block_resnet.fullmatch(lowercase ):
lowerCamelCase_ = re_encoder_block_resnet.match(lowercase )
lowerCamelCase_ = regex_match.groups()
lowerCamelCase_ = int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ = {'1': 1, '3': 2}[groups[-2]]
lowerCamelCase_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
lowerCamelCase_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCamelCase_ = prefix + resnet_block
lowerCamelCase_ = re_encoder_block_resnet.sub(lowercase , lowercase )
elif re_encoder_block_proj_out.fullmatch(lowercase ):
lowerCamelCase_ = re_encoder_block_proj_out.match(lowercase )
lowerCamelCase_ = regex_match.groups()
lowerCamelCase_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
lowerCamelCase_ = re_encoder_block_proj_out.sub(lowercase , lowercase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowercase ):
lowerCamelCase_ = re_decoder_block_conv_out.match(lowercase )
lowerCamelCase_ = regex_match.groups()
lowerCamelCase_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
lowerCamelCase_ = re_decoder_block_conv_out.sub(lowercase , lowercase )
elif re_decoder_block_resnet.fullmatch(lowercase ):
lowerCamelCase_ = re_decoder_block_resnet.match(lowercase )
lowerCamelCase_ = regex_match.groups()
lowerCamelCase_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ = {'1': 1, '3': 2}[groups[-2]]
lowerCamelCase_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
lowerCamelCase_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCamelCase_ = prefix + resnet_block
lowerCamelCase_ = re_decoder_block_resnet.sub(lowercase , lowercase )
elif re_decoder_block_proj_in.fullmatch(lowercase ):
lowerCamelCase_ = re_decoder_block_proj_in.match(lowercase )
lowerCamelCase_ = regex_match.groups()
lowerCamelCase_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
lowerCamelCase_ = re_decoder_block_proj_in.sub(lowercase , lowercase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowercase ):
lowerCamelCase_ = re_prior_cond_conv_out.match(lowercase )
lowerCamelCase_ = regex_match.groups()
lowerCamelCase_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
lowerCamelCase_ = re_prior_cond_conv_out.sub(lowercase , lowercase )
elif re_prior_cond_resnet.fullmatch(lowercase ):
lowerCamelCase_ = re_prior_cond_resnet.match(lowercase )
lowerCamelCase_ = regex_match.groups()
lowerCamelCase_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ = {'1': 1, '3': 2}[groups[-2]]
lowerCamelCase_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
lowerCamelCase_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCamelCase_ = prefix + resnet_block
lowerCamelCase_ = re_prior_cond_resnet.sub(lowercase , lowercase )
elif re_prior_cond_proj_in.fullmatch(lowercase ):
lowerCamelCase_ = re_prior_cond_proj_in.match(lowercase )
lowerCamelCase_ = regex_match.groups()
lowerCamelCase_ = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
lowerCamelCase_ = re_prior_cond_proj_in.sub(lowercase , lowercase )
# keep original key
else:
lowerCamelCase_ = original_key
lowerCamelCase_ = replace_key(lowercase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
lowerCamelCase_ = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
lowerCamelCase_ = original_key
lowerCamelCase_ = original_key
lowerCamelCase_ = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any]=None , lowercase : List[str]=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
lowerCamelCase_ = requests.get(f"""{PREFIX}{file}""" , allow_redirects=lowercase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=lowercase )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , 'wb' ).write(r.content )
lowerCamelCase_ = MODEL_MAPPING[model_name.split('/' )[-1]]
lowerCamelCase_ = JukeboxConfig.from_pretrained(lowercase )
lowerCamelCase_ = JukeboxModel(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = {}
for i, dict_name in enumerate(lowercase ):
lowerCamelCase_ = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['model']
lowerCamelCase_ = {}
for k in old_dic.keys():
if k.endswith('.b' ):
lowerCamelCase_ = old_dic[k]
elif k.endswith('.w' ):
lowerCamelCase_ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase_ = old_dic[k]
else:
lowerCamelCase_ = old_dic[k]
lowerCamelCase_ = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
lowerCamelCase_ = fix_jukebox_keys(lowercase , model.state_dict() , lowercase , lowercase )
weight_dict.append(lowercase )
lowerCamelCase_ = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowercase )
for i in range(len(lowercase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowercase ).mkdir(exist_ok=lowercase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(lowercase , lowercase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
return weight_dict
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
lowerCamelCase : List[str] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 70
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _a ( metaclass=SCREAMING_SNAKE_CASE__):
__magic_name__ = ["""keras_nlp"""]
def __init__( self : int , *_lowercase : Optional[int] , **_lowercase : Dict ) -> List[Any]:
requires_backends(self , ["keras_nlp"] )
| 449
| 0
|
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_a : Dict = [p / w for p, w in zip(UpperCamelCase_ , UpperCamelCase_ )]
# Creating a copy of the list and sorting profit/weight in ascending order
_a : Optional[int] = sorted(UpperCamelCase_ )
# declaring useful variables
_a : Optional[Any] = len(UpperCamelCase_ )
_a : Optional[Any] = 0
_a : Any = 0
_a : Tuple = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_a : int = sorted_profit_by_weight[length - i - 1]
_a : int = profit_by_weight.index(UpperCamelCase_ )
_a : List[str] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
__UpperCAmelCase : Dict = [int(x) for x in input('Input profits separated by spaces: ').split()]
__UpperCAmelCase : List[str] = [int(x) for x in input('Input weights separated by spaces: ').split()]
__UpperCAmelCase : Tuple = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 720
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase :
def __init__( self : Tuple , __snake_case : Any , __snake_case : List[str]=13 , __snake_case : Dict=2 , __snake_case : Dict=24 , __snake_case : Optional[int]=16 , __snake_case : Optional[Any]=True , __snake_case : Any=True , __snake_case : Dict=32 , __snake_case : Optional[Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : Optional[Any]=37 , __snake_case : str="gelu" , __snake_case : Dict=0.1 , __snake_case : int=0.1 , __snake_case : Optional[int]=10 , __snake_case : str=0.02 , __snake_case : Any=None , __snake_case : Optional[Any]=2 , __snake_case : Optional[Any]=2 , ) -> Optional[Any]:
_a : str = parent
_a : Dict = batch_size
_a : Union[str, Any] = patch_size
_a : int = max_length
_a : Dict = num_mel_bins
_a : str = is_training
_a : Optional[Any] = use_labels
_a : List[Any] = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : Any = num_attention_heads
_a : List[Any] = intermediate_size
_a : Any = hidden_act
_a : Union[str, Any] = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : Any = type_sequence_label_size
_a : Any = initializer_range
_a : Optional[Any] = scope
_a : List[Any] = frequency_stride
_a : Tuple = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_a : int = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_a : Dict = (self.max_length - self.patch_size) // self.time_stride + 1
_a : str = frequency_out_dimension * time_out_dimension
_a : Union[str, Any] = num_patches + 2
def snake_case_ ( self : List[str] ) -> List[Any]:
_a : str = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Optional[int] = self.get_config()
return config, input_values, labels
def snake_case_ ( self : Dict ) -> Optional[int]:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case_ ( self : Dict , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> Tuple:
_a : Any = ASTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : Union[str, Any] ) -> str:
_a : Optional[int] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) : Tuple = config_and_inputs
_a : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : str = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Union[str, Any] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
UpperCAmelCase : Tuple = False
UpperCAmelCase : str = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : int = False
def snake_case_ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[int] ) -> List[Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case_ ( self : List[str] ) -> List[Any]:
_a : str = ASTModelTester(self )
_a : str = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case_ ( self : int ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def snake_case_ ( self : Optional[int] ) -> Optional[int]:
pass
def snake_case_ ( self : Any ) -> int:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(__snake_case )
_a : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : str = [*signature.parameters.keys()]
_a : Union[str, Any] = ['''input_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case_ ( self : List[Any] ) -> Optional[Any]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
@slow
def snake_case_ ( self : Any ) -> List[str]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Tuple = ASTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase_ ( ):
_a : int = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
_a , _a : Tuple = torchaudio.load(UpperCamelCase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def snake_case_ ( self : List[Any] ) -> List[str]:
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def snake_case_ ( self : Union[str, Any] ) -> Dict:
_a : List[Any] = self.default_feature_extractor
_a : List[str] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__snake_case )
_a : Union[str, Any] = self.default_feature_extractor
_a , _a : str = prepare_audio()
_a : List[str] = audio.squeeze().numpy()
_a : str = feature_extractor(__snake_case , sampling_rate=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
_a : Dict = model(**__snake_case )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , __snake_case )
_a : str = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
| 249
| 0
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : int = {"vocab_file": "spiece.model"}
_lowerCamelCase : Any = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCamelCase : Dict = {
"t5-small": 5_1_2,
"t5-base": 5_1_2,
"t5-large": 5_1_2,
"t5-3b": 5_1_2,
"t5-11b": 5_1_2,
}
_lowerCamelCase : List[Any] = "▁"
class __snake_case (_a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : List[Any]="<unk>" , _UpperCAmelCase : List[str]="<pad>" , _UpperCAmelCase : Dict=100 , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[Dict[str, Any]] = None , _UpperCAmelCase : Optional[int]=True , **_UpperCAmelCase : List[Any] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase : List[Any] = [f"<extra_id_{i}>" for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase : str = len(set(filter(lambda _UpperCAmelCase : bool("""extra_id""" in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
f"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
_lowerCAmelCase : List[Any] = legacy
_lowerCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCAmelCase , **_UpperCAmelCase , )
_lowerCAmelCase : Dict = vocab_file
_lowerCAmelCase : Tuple = extra_ids
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_lowerCAmelCase : List[str] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , _UpperCAmelCase , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCAmelCase )) + [1]
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
'''simple docstring'''
return list(
set(filter(lambda _UpperCAmelCase : bool(re.search(R"""<extra_id_\d+>""" , _UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
'''simple docstring'''
return [self._convert_token_to_id(_UpperCAmelCase ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : List[int] ) -> List[int]:
'''simple docstring'''
if len(_UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowerCAmelCase : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self._add_eos_if_not_present(_UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase : Optional[Any] = self._add_eos_if_not_present(_UpperCAmelCase )
return token_ids_a + token_ids_a
def __getstate__( self : Dict ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : str = self.__dict__.copy()
_lowerCAmelCase : Tuple = None
return state
def __setstate__( self : int , _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : List[Any] = {}
_lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : "TextInput" , **_UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
if not self.legacy:
_lowerCAmelCase : Any = SPIECE_UNDERLINE + text.replace(_UpperCAmelCase , """ """ )
return super().tokenize(_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Dict , **_UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
if not self.legacy:
_lowerCAmelCase : Any = text.startswith(_UpperCAmelCase )
if is_first:
_lowerCAmelCase : List[Any] = text[1:]
_lowerCAmelCase : Tuple = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(_UpperCAmelCase ):
_lowerCAmelCase : int = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
if token.startswith("""<extra_id_""" ):
_lowerCAmelCase : List[str] = re.match(R"""<extra_id_(\d+)>""" , _UpperCAmelCase )
_lowerCAmelCase : Tuple = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_lowerCAmelCase : List[Any] = self.sp_model.IdToPiece(_UpperCAmelCase )
else:
_lowerCAmelCase : Optional[Any] = f"<extra_id_{self.vocab_size - 1 - index}>"
return token
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : str = """"""
_lowerCAmelCase : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
_lowerCAmelCase : Any = True
_lowerCAmelCase : Dict = []
else:
current_sub_tokens.append(_UpperCAmelCase )
_lowerCAmelCase : Tuple = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Optional[int] = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 429
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __snake_case (_a ):
lowerCAmelCase__ = "cvt"
def __init__( self : Any , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Dict=[7, 3, 3] , _UpperCAmelCase : Union[str, Any]=[4, 2, 2] , _UpperCAmelCase : str=[2, 1, 1] , _UpperCAmelCase : Optional[int]=[64, 192, 384] , _UpperCAmelCase : List[str]=[1, 3, 6] , _UpperCAmelCase : Tuple=[1, 2, 10] , _UpperCAmelCase : List[Any]=[4.0, 4.0, 4.0] , _UpperCAmelCase : Union[str, Any]=[0.0, 0.0, 0.0] , _UpperCAmelCase : List[Any]=[0.0, 0.0, 0.0] , _UpperCAmelCase : Union[str, Any]=[0.0, 0.0, 0.1] , _UpperCAmelCase : List[Any]=[True, True, True] , _UpperCAmelCase : Tuple=[False, False, True] , _UpperCAmelCase : Optional[Any]=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase : List[str]=[3, 3, 3] , _UpperCAmelCase : List[str]=[1, 1, 1] , _UpperCAmelCase : Tuple=[2, 2, 2] , _UpperCAmelCase : List[Any]=[1, 1, 1] , _UpperCAmelCase : Dict=[1, 1, 1] , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Tuple=1E-12 , **_UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Dict = patch_sizes
_lowerCAmelCase : List[Any] = patch_stride
_lowerCAmelCase : List[str] = patch_padding
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[Any] = num_heads
_lowerCAmelCase : Any = depth
_lowerCAmelCase : Optional[int] = mlp_ratio
_lowerCAmelCase : Optional[Any] = attention_drop_rate
_lowerCAmelCase : List[Any] = drop_rate
_lowerCAmelCase : Tuple = drop_path_rate
_lowerCAmelCase : Optional[Any] = qkv_bias
_lowerCAmelCase : Tuple = cls_token
_lowerCAmelCase : Optional[Any] = qkv_projection_method
_lowerCAmelCase : int = kernel_qkv
_lowerCAmelCase : Union[str, Any] = padding_kv
_lowerCAmelCase : List[Any] = stride_kv
_lowerCAmelCase : List[str] = padding_q
_lowerCAmelCase : Optional[int] = stride_q
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
| 429
| 1
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase : Optional[Any] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( _a : List[Any] , _a : Optional[Any]=None ):
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 702
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Any = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : int = 'altclip_text_model'
def __init__( self , _SCREAMING_SNAKE_CASE=25_0002 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=514 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-05 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=768 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : int = initializer_range
snake_case_ : str = initializer_factor
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Any = position_embedding_type
snake_case_ : Optional[int] = use_cache
snake_case_ : int = project_dim
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[Any] = 'altclip_vision_model'
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="quick_gelu" , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = hidden_size
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = projection_dim
snake_case_ : int = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = num_channels
snake_case_ : Optional[int] = patch_size
snake_case_ : Dict = image_size
snake_case_ : Tuple = initializer_range
snake_case_ : Optional[Any] = initializer_factor
snake_case_ : Any = attention_dropout
snake_case_ : Tuple = layer_norm_eps
snake_case_ : int = hidden_act
@classmethod
def _lowerCAmelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ : str = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
snake_case_ : List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : str = 'altclip'
A : Dict = True
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=2.6592 , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
snake_case_ : Optional[Any] = kwargs.pop("text_config_dict" , _SCREAMING_SNAKE_CASE )
snake_case_ : Dict = kwargs.pop("vision_config_dict" , _SCREAMING_SNAKE_CASE )
super().__init__(**_SCREAMING_SNAKE_CASE )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
snake_case_ : Any = {}
# This is the complete result when using `text_config_dict`.
snake_case_ : str = AltCLIPTextConfig(**_SCREAMING_SNAKE_CASE ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
snake_case_ : Optional[Any] = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
snake_case_ : Dict = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(_SCREAMING_SNAKE_CASE )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
snake_case_ : str = {}
# This is the complete result when using `vision_config_dict`.
snake_case_ : Optional[Any] = AltCLIPVisionConfig(**_SCREAMING_SNAKE_CASE ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
snake_case_ : Any = {
str(_SCREAMING_SNAKE_CASE ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
snake_case_ : Dict = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
snake_case_ : Optional[Any] = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(_SCREAMING_SNAKE_CASE )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
snake_case_ : Tuple = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
snake_case_ : Optional[Any] = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
snake_case_ : List[Any] = AltCLIPTextConfig(**_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = AltCLIPVisionConfig(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = projection_dim
snake_case_ : Tuple = logit_scale_init_value
snake_case_ : List[Any] = 1.0
@classmethod
def _lowerCAmelCase ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : Any = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.text_config.to_dict()
snake_case_ : List[Any] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 114
| 0
|
"""simple docstring"""
import random
def __A ( a_ :int , a_ :float , a_ :bool = False) -> dict:
__a : dict = {i: [] for i in range(a_)}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(a_)
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(a_):
for j in range(i + 1 , a_):
if random.random() < probability:
graph[i].append(a_)
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(a_)
return graph
def __A ( a_ :int) -> dict:
return {
i: [j for j in range(a_) if i != j] for i in range(a_)
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
UpperCamelCase__ : Any = None
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : int = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase__ : Union[str, Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase__ : Optional[int] = {
"""google/bigbird-roberta-base""": 4_096,
"""google/bigbird-roberta-large""": 4_096,
"""google/bigbird-base-trivia-itc""": 4_096,
}
UpperCamelCase__ : Tuple = """▁"""
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ = []
def __init__( self : Any ,__lowerCamelCase : List[str]=None ,__lowerCamelCase : Union[str, Any]=None ,__lowerCamelCase : Optional[int]="<unk>" ,__lowerCamelCase : Dict="<s>" ,__lowerCamelCase : Tuple="</s>" ,__lowerCamelCase : List[str]="<pad>" ,__lowerCamelCase : Tuple="[SEP]" ,__lowerCamelCase : List[str]="[MASK]" ,__lowerCamelCase : List[Any]="[CLS]" ,**__lowerCamelCase : List[str] ,):
'''simple docstring'''
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase ,tokenizer_file=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,**__lowerCamelCase ,)
a = vocab_file
a = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ,__lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file ,__lowerCamelCase )
return (out_vocab_file,)
| 387
| 0
|
'''simple docstring'''
import sys
__A : List[str] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowercase__ = N):
lowerCamelCase__ = -sys.maxsize - 1
for i in range(len(snake_case_) - 12):
lowerCamelCase__ = 1
for j in range(13):
product *= int(n[i + j])
if product > largest_product:
lowerCamelCase__ = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 703
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Dict ) -> str:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = d_embed
lowerCamelCase__ = d_proj
lowerCamelCase__ = cutoffs + [vocab_size]
lowerCamelCase__ = [0] + self.cutoffs
lowerCamelCase__ = div_val
lowerCamelCase__ = self.cutoffs[0]
lowerCamelCase__ = len(self.cutoffs ) - 1
lowerCamelCase__ = self.shortlist_size + self.n_clusters
lowerCamelCase__ = keep_order
lowerCamelCase__ = []
lowerCamelCase__ = []
def a__ ( self : Optional[int] , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
if self.n_clusters > 0:
lowerCamelCase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=__lowerCamelCase , name="cluster_weight" )
lowerCamelCase__ = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=__lowerCamelCase , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCamelCase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_projs_._{i}''' , )
self.out_projs.append(__lowerCamelCase )
else:
self.out_projs.append(__lowerCamelCase )
lowerCamelCase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
lowerCamelCase__ = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ = self.d_embed // (self.div_val**i)
lowerCamelCase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_projs_._{i}''' )
self.out_projs.append(__lowerCamelCase )
lowerCamelCase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
lowerCamelCase__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__lowerCamelCase )
@staticmethod
def a__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = x
if proj is not None:
lowerCamelCase__ = tf.einsum("ibd,ed->ibe" , __lowerCamelCase , __lowerCamelCase )
return tf.einsum("ibd,nd->ibn" , __lowerCamelCase , __lowerCamelCase ) + b
@staticmethod
def a__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = shape_list(__lowerCamelCase )
lowerCamelCase__ = tf.range(lp_size[0] , dtype=target.dtype )
lowerCamelCase__ = tf.stack([r, target] , 1 )
return tf.gather_nd(__lowerCamelCase , __lowerCamelCase )
def a__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=False ) -> int:
'''simple docstring'''
lowerCamelCase__ = 0
if self.n_clusters == 0:
lowerCamelCase__ = self._logit(__lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCamelCase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowerCamelCase , logits=__lowerCamelCase )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase , axis=-1 )
else:
lowerCamelCase__ = shape_list(__lowerCamelCase )
lowerCamelCase__ = []
lowerCamelCase__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCamelCase__ = (target >= l_idx) & (target < r_idx)
lowerCamelCase__ = tf.where(__lowerCamelCase )
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase ) - l_idx
if self.div_val == 1:
lowerCamelCase__ = self.out_layers[0][0][l_idx:r_idx]
lowerCamelCase__ = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCamelCase__ = self.out_layers[i][0]
lowerCamelCase__ = self.out_layers[i][1]
if i == 0:
lowerCamelCase__ = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCamelCase__ = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCamelCase__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[0] )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
else:
lowerCamelCase__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[i] )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase )
lowerCamelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCamelCase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__lowerCamelCase )
if target is not None:
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__lowerCamelCase , -cur_logprob , shape_list(__lowerCamelCase ) )
lowerCamelCase__ = tf.concat(__lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
lowerCamelCase__ = tf.reduce_mean(__lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__lowerCamelCase , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 187
| 0
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a = logging.getLogger(__name__)
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''whether to use adafactor'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(default=__lowerCAmelCase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[str] = field(
default='''linear''' , metadata={'''help''': f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 7
|
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 7
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : int = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = XLMRobertaTokenizer
lowerCAmelCase = XLMRobertaTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = True
def _UpperCAmelCase ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
A = XLMRobertaTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Tuple:
A = """<pad>"""
A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def _UpperCAmelCase ( self ) -> Any:
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(a__ ) , 1002 )
def _UpperCAmelCase ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def _UpperCAmelCase ( self ) -> str:
A = XLMRobertaTokenizer(a__ , keep_accents=a__ )
A = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _UpperCAmelCase ( self ) -> Any:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
A = self.tokenizer_class.from_pretrained(a__ , **a__ )
A = tempfile.mkdtemp()
A = tokenizer_r.save_pretrained(a__ )
A = tokenizer_p.save_pretrained(a__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(a__ , a__ )
# Checks everything loads correctly in the same way
A = tokenizer_r.from_pretrained(a__ )
A = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(a__ )
# Save tokenizer rust, legacy_format=True
A = tempfile.mkdtemp()
A = tokenizer_r.save_pretrained(a__ , legacy_format=a__ )
A = tokenizer_p.save_pretrained(a__ )
# Checks it save with the same files
self.assertSequenceEqual(a__ , a__ )
# Checks everything loads correctly in the same way
A = tokenizer_r.from_pretrained(a__ )
A = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
shutil.rmtree(a__ )
# Save tokenizer rust, legacy_format=False
A = tempfile.mkdtemp()
A = tokenizer_r.save_pretrained(a__ , legacy_format=a__ )
A = tokenizer_p.save_pretrained(a__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A = tokenizer_r.from_pretrained(a__ )
A = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
shutil.rmtree(a__ )
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def _UpperCAmelCase ( self ) -> int:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
A = XLMRobertaTokenizer(f.name , keep_accents=a__ )
A = pickle.dumps(a__ )
pickle.loads(a__ )
def _UpperCAmelCase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = """I was born in 92000, and this is falsé."""
A = tokenizer.tokenize(a__ )
A = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
A = tokenizer.encode(a__ , add_special_tokens=a__ )
A = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
A = self.get_rust_tokenizer()
A = tokenizer.encode(a__ )
A = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def _UpperCAmelCase ( self ) -> Any:
A = """Hello World!"""
A = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
A = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
A = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# fmt: off
A = {"""input_ids""": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 546
|
import sys
from collections import defaultdict
class _UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> Any:
A = []
def _UpperCAmelCase ( self , a__ ) -> List[str]:
return self.node_position[vertex]
def _UpperCAmelCase ( self , a__ , a__ ) -> Optional[int]:
A = pos
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ ) -> Any:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a__ )
self.top_to_bottom(a__ , a__ , a__ , a__ )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] , a__ )
else:
A = val
A = temp
self.set_position(a__ , a__ )
break
A = parent
else:
A = val
A = temp
self.set_position(a__ , 0 )
def _UpperCAmelCase ( self , a__ , a__ ) -> Optional[int]:
A = len(a__ ) // 2 - 1
for i in range(a__ , -1 , -1 ):
self.top_to_bottom(a__ , a__ , len(a__ ) , a__ )
def _UpperCAmelCase ( self , a__ , a__ ) -> Any:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(a__ , 0 , len(a__ ) , a__ )
return temp
def _lowerCAmelCase ( UpperCamelCase__: List[str] ) -> List[str]:
"""simple docstring"""
A = Heap()
A = [0] * len(UpperCamelCase__ )
A = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
A = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
A = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase : Tuple = int(input("Enter number of edges: ").strip())
_lowercase : int = defaultdict(list)
for _ in range(edges_number):
_lowercase : Optional[Any] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 546
| 1
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__UpperCAmelCase = '<<<<<<< This should probably be modified because it mentions: '
__UpperCAmelCase = '=======\n>>>>>>>\n'
__UpperCAmelCase = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
__UpperCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCamelCase (__UpperCAmelCase ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : str = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowercase_ , required=lowercase_ , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowercase_ , required=lowercase_ , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowercase_ )
def __init__( self , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> int:
UpperCAmelCase_ : Optional[Any] = get_logger('datasets-cli/converting' )
UpperCAmelCase_ : List[Any] = tfds_path
UpperCAmelCase_ : Tuple = datasets_directory
def __UpperCAmelCase ( self ) -> Union[str, Any]:
if os.path.isdir(self._tfds_path ):
UpperCAmelCase_ : Tuple = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCAmelCase_ : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
UpperCAmelCase_ : str = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[Any] = {}
if os.path.isdir(self._tfds_path ):
UpperCAmelCase_ : int = os.listdir(lowercase_ )
else:
UpperCAmelCase_ : Optional[int] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
UpperCAmelCase_ : Optional[int] = os.path.join(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = os.path.join(lowercase_ , lowercase_ )
if not os.path.isfile(lowercase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowercase_ , encoding='utf-8' ) as f:
UpperCAmelCase_ : List[Any] = f.readlines()
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : List[str] = []
for line in lines:
UpperCAmelCase_ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase_ : List[Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase_ : Dict = ''''''
continue
elif "from absl import logging" in out_line:
UpperCAmelCase_ : List[Any] = '''from datasets import logging\n'''
elif "getLogger" in out_line:
UpperCAmelCase_ : List[str] = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Optional[Any] = list(filter(lambda _UpperCamelCase : e in out_line , lowercase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowercase_ ) + '\n' )
out_lines.append(lowercase_ )
out_lines.append(lowercase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase_ : Dict = re.sub(lowercase_ , lowercase_ , lowercase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase_ : List[str] = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowercase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
UpperCAmelCase_ : Union[str, Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase_ : Tuple = True
out_lines.append(lowercase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase_ : List[Any] = f_name.replace('.py' , '' )
UpperCAmelCase_ : Union[str, Any] = os.path.join(lowercase_ , lowercase_ )
UpperCAmelCase_ : str = os.path.join(lowercase_ , lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowercase_ )
if needs_manual_update:
with_manual_update.append(lowercase_ )
with open(lowercase_ , 'w' , encoding='utf-8' ) as f:
f.writelines(lowercase_ )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
UpperCAmelCase_ : List[Any] = os.path.basename(lowercase_ )
UpperCAmelCase_ : Optional[int] = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(lowercase_ , lowercase_ )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'." )
| 406
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = "▁"
lowerCAmelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class A (__UpperCAmelCase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE = BertGenerationTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
def __a ( self ) -> str:
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = BertGenerationTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> Dict:
'''simple docstring'''
_snake_case : Any = '''<s>'''
_snake_case : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def __a ( self ) -> Tuple:
'''simple docstring'''
_snake_case : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(lowercase_ ) , 1002 )
def __a ( self ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
_snake_case : Any = BertGenerationTokenizer(lowercase_ , keep_accents=lowercase_ )
_snake_case : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_snake_case : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_snake_case : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self ) -> Dict:
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __a ( self ) -> List[Any]:
'''simple docstring'''
_snake_case : Optional[int] = '''Hello World!'''
_snake_case : int = [1_8536, 2260, 101]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
_snake_case : Optional[int] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_snake_case : int = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def __a ( self ) -> List[Any]:
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_snake_case : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : Tuple = ''' '''.join(lowercase_ )
_snake_case : List[Any] = self.big_tokenizer.encode_plus(lowercase_ , return_tensors='''pt''' , return_token_type_ids=lowercase_ )
_snake_case : str = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=lowercase_ )
_snake_case : str = BertGenerationConfig()
_snake_case : int = BertGenerationEncoder(lowercase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
_snake_case : Dict = {'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 326
| 0
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( A_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[Any] = OpenAIGPTTokenizer
lowercase__ : Optional[Any] = OpenAIGPTTokenizerFast
lowercase__ : str = True
lowercase__ : Any = False
def snake_case__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__lowercase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__lowercase = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(lowercase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(lowercase ) )
def snake_case__ ( self : Dict , lowercase : Tuple ) -> Any:
"""simple docstring"""
return "lower newer", "lower newer"
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__lowercase = """lower"""
__lowercase = ["""low""", """er</w>"""]
__lowercase = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = tokens + ["""<unk>"""]
__lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def snake_case__ ( self : Optional[int] , lowercase : Any=15 ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
# Simple input
__lowercase = """This is a simple input"""
__lowercase = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowercase = ("""This is a simple input""", """This is a pair""")
__lowercase = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding="""max_length""" , )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding="""max_length""" , )
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _lowerCAmelCase ( A_ ):
"""simple docstring"""
pass
| 708
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634
| 0
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : str =['''pixel_values''']
def __init__( self :Optional[int], snake_case :bool = True, snake_case :Union[int, float] = 1 / 255, snake_case :bool = True, snake_case :int = 8, **snake_case :str, ):
"""simple docstring"""
super().__init__(**snake_case)
_lowercase =do_rescale
_lowercase =rescale_factor
_lowercase =do_pad
_lowercase =pad_size
def UpperCamelCase__ ( self :List[Any], snake_case :np.ndarray, snake_case :float, snake_case :Optional[Union[str, ChannelDimension]] = None, **snake_case :Optional[Any]):
"""simple docstring"""
return rescale(snake_case, scale=snake_case, data_format=snake_case, **snake_case)
def UpperCamelCase__ ( self :str, snake_case :np.ndarray, snake_case :int, snake_case :Optional[Union[str, ChannelDimension]] = None):
"""simple docstring"""
_lowercase , _lowercase =get_image_size(snake_case)
_lowercase =(old_height // size + 1) * size - old_height
_lowercase =(old_width // size + 1) * size - old_width
return pad(snake_case, ((0, pad_height), (0, pad_width)), mode='symmetric', data_format=snake_case)
def UpperCamelCase__ ( self :str, snake_case :ImageInput, snake_case :Optional[bool] = None, snake_case :Optional[float] = None, snake_case :Optional[bool] = None, snake_case :Optional[int] = None, snake_case :Optional[Union[str, TensorType]] = None, snake_case :Union[str, ChannelDimension] = ChannelDimension.FIRST, **snake_case :Dict, ):
"""simple docstring"""
_lowercase =do_rescale if do_rescale is not None else self.do_rescale
_lowercase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase =do_pad if do_pad is not None else self.do_pad
_lowercase =pad_size if pad_size is not None else self.pad_size
_lowercase =make_list_of_images(snake_case)
if not valid_images(snake_case):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
_lowercase =[to_numpy_array(snake_case) for image in images]
if do_rescale:
_lowercase =[self.rescale(image=snake_case, scale=snake_case) for image in images]
if do_pad:
_lowercase =[self.pad(snake_case, size=snake_case) for image in images]
_lowercase =[to_channel_dimension_format(snake_case, snake_case) for image in images]
_lowercase ={'pixel_values': images}
return BatchFeature(data=snake_case, tensor_type=snake_case)
| 181
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Tuple ='''layoutlmv3'''
def __init__( self :Optional[int], snake_case :int=5_0265, snake_case :int=768, snake_case :Any=12, snake_case :Union[str, Any]=12, snake_case :List[str]=3072, snake_case :List[str]="gelu", snake_case :List[str]=0.1, snake_case :Optional[int]=0.1, snake_case :Optional[int]=512, snake_case :Tuple=2, snake_case :Optional[Any]=0.0_2, snake_case :Optional[int]=1e-5, snake_case :Union[str, Any]=1, snake_case :Dict=0, snake_case :Tuple=2, snake_case :Tuple=1024, snake_case :Optional[Any]=128, snake_case :Optional[Any]=128, snake_case :List[str]=True, snake_case :str=32, snake_case :Optional[int]=128, snake_case :Dict=64, snake_case :List[str]=256, snake_case :Optional[Any]=True, snake_case :Optional[Any]=True, snake_case :Dict=True, snake_case :Optional[Any]=224, snake_case :int=3, snake_case :int=16, snake_case :str=None, **snake_case :Dict, ):
"""simple docstring"""
super().__init__(
vocab_size=snake_case, hidden_size=snake_case, num_hidden_layers=snake_case, num_attention_heads=snake_case, intermediate_size=snake_case, hidden_act=snake_case, hidden_dropout_prob=snake_case, attention_probs_dropout_prob=snake_case, max_position_embeddings=snake_case, type_vocab_size=snake_case, initializer_range=snake_case, layer_norm_eps=snake_case, pad_token_id=snake_case, bos_token_id=snake_case, eos_token_id=snake_case, **snake_case, )
_lowercase =max_ad_position_embeddings
_lowercase =coordinate_size
_lowercase =shape_size
_lowercase =has_relative_attention_bias
_lowercase =rel_pos_bins
_lowercase =max_rel_pos
_lowercase =has_spatial_attention_bias
_lowercase =rel_ad_pos_bins
_lowercase =max_rel_ad_pos
_lowercase =text_embed
_lowercase =visual_embed
_lowercase =input_size
_lowercase =num_channels
_lowercase =patch_size
_lowercase =classifier_dropout
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =version.parse('''1.12''' )
@property
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
])
@property
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
return 1e-5
@property
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
return 12
def UpperCamelCase__ ( self :List[Any], snake_case :"ProcessorMixin", snake_case :int = -1, snake_case :int = -1, snake_case :bool = False, snake_case :Optional["TensorType"] = None, snake_case :int = 3, snake_case :int = 40, snake_case :int = 40, ):
"""simple docstring"""
setattr(processor.image_processor, 'apply_ocr', snake_case)
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase =compute_effective_axis_dimension(
snake_case, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase =processor.tokenizer.num_special_tokens_to_add(snake_case)
_lowercase =compute_effective_axis_dimension(
snake_case, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=snake_case)
# Generate dummy inputs according to compute batch and sequence
_lowercase =[[' '.join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
# Generate dummy bounding boxes
_lowercase =[[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_lowercase =self._generate_dummy_images(snake_case, snake_case, snake_case, snake_case)
_lowercase =dict(
processor(
snake_case, text=snake_case, boxes=snake_case, return_tensors=snake_case, ))
return inputs
| 181
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = '▁'
__UpperCamelCase : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCamelCase : List[Any] = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
__UpperCamelCase : List[Any] = {
'facebook/xglm-564M': 2048,
}
class _UpperCamelCase ( A ):
'''simple docstring'''
a_ : Dict = VOCAB_FILES_NAMES
a_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]="<s>" , _lowerCamelCase : int="</s>" , _lowerCamelCase : List[str]="</s>" , _lowerCamelCase : Optional[int]="<s>" , _lowerCamelCase : Any="<unk>" , _lowerCamelCase : Tuple="<pad>" , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
__lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__lowerCamelCase : str = 7
__lowerCamelCase : Any = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
__lowerCamelCase : List[Any] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
__lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
__lowerCamelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCamelCase : Any = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCamelCase : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
__lowerCamelCase : Optional[int] = len(self.sp_model )
__lowerCamelCase : Optional[int] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
'''simple docstring'''
__lowerCamelCase : Tuple = self.__dict__.copy()
__lowerCamelCase : int = None
__lowerCamelCase : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCamelCase : Union[str, Any] = {}
__lowerCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self : str , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__lowerCamelCase : Any = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _snake_case ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase ))
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase ))
def _snake_case ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__lowerCamelCase : Any = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Any = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self : str , _lowerCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _snake_case ( self : Any , _lowerCamelCase : Tuple ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCamelCase : Tuple = self.sp_model.PieceToId(_lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self : List[str] , _lowerCamelCase : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self : Optional[int] , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Dict = """""".join(_lowerCamelCase ).replace(_lowerCamelCase , """ """ ).strip()
return out_string
def _snake_case ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase : str = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
__lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 458
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Dict=1_0 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : Tuple=3_2 * 8 , _lowerCamelCase : int=3_2 * 8 , _lowerCamelCase : str=4 , _lowerCamelCase : List[Any]=6_4 , ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_auxiliary_loss
__lowerCamelCase : Optional[int] = num_queries
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = min_size
__lowerCamelCase : Optional[Any] = max_size
__lowerCamelCase : str = num_labels
__lowerCamelCase : Optional[Any] = hidden_dim
__lowerCamelCase : Optional[Any] = hidden_dim
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
__lowerCamelCase : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
__lowerCamelCase : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
__lowerCamelCase : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
__lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCamelCase : List[Any] = self.num_queries
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : List[str] = [1, 1, 1, 1]
__lowerCamelCase : Optional[int] = self.num_channels
__lowerCamelCase : Optional[int] = 6_4
__lowerCamelCase : int = 1_2_8
__lowerCamelCase : Any = self.hidden_dim
__lowerCamelCase : List[str] = self.hidden_dim
__lowerCamelCase : List[Any] = self.hidden_dim
return config
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = self.prepare_config_and_inputs()
__lowerCamelCase : Optional[int] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _snake_case ( self : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Tuple = output.encoder_hidden_states
__lowerCamelCase : Any = output.pixel_decoder_hidden_states
__lowerCamelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_layers )
def _snake_case ( self : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : List[str]=False ):
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase : Any = MaskaFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowerCamelCase : Dict = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
__lowerCamelCase : Optional[Any] = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCamelCase : List[Any] = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(_lowerCamelCase : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
__lowerCamelCase : Tuple = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
__lowerCamelCase : str = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCamelCase ( A,A,unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
a_ : Union[str, Any] = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
a_ : Optional[Any] = False
a_ : int = False
a_ : List[str] = False
a_ : List[Any] = False
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Dict = MaskaFormerModelTester(self )
__lowerCamelCase : int = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def _snake_case ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def _snake_case ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def _snake_case ( self : str ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _snake_case ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self : Any ):
'''simple docstring'''
pass
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(_lowerCamelCase )
__lowerCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase : Dict = MaskaFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2
__lowerCamelCase : Optional[Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_lowerCamelCase ),
"""mask_labels""": torch.randn((2, 1_0, *size) , device=_lowerCamelCase ),
"""class_labels""": torch.zeros(2 , 1_0 , device=_lowerCamelCase ).long(),
}
__lowerCamelCase : Any = self.model_tester.get_config()
__lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(_lowerCamelCase ).to(_lowerCamelCase )
__lowerCamelCase : int = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def _snake_case ( self : Any ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCamelCase : str = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
__lowerCamelCase : Tuple = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : Any = True
__lowerCamelCase : Any = model_class(_lowerCamelCase ).to(_lowerCamelCase )
model.train()
__lowerCamelCase : Optional[Any] = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCamelCase : str = 1E-4
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _snake_case ( self : Dict ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase )
__lowerCamelCase : str = self.default_image_processor
__lowerCamelCase : Dict = prepare_img()
__lowerCamelCase : List[Any] = image_processor(_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
__lowerCamelCase : Optional[int] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
__lowerCamelCase : str = model(**_lowerCamelCase )
__lowerCamelCase : Dict = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
__lowerCamelCase : Union[str, Any] = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
__lowerCamelCase : Tuple = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
__lowerCamelCase : Any = self.default_image_processor
__lowerCamelCase : Union[str, Any] = prepare_img()
__lowerCamelCase : int = image_processor(_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
__lowerCamelCase : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
__lowerCamelCase : Dict = model(**_lowerCamelCase )
# masks_queries_logits
__lowerCamelCase : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowerCamelCase : Any = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
__lowerCamelCase : List[str] = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
__lowerCamelCase : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase : List[Any] = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
__lowerCamelCase : Dict = self.default_image_processor
__lowerCamelCase : List[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="""pt""" , )
__lowerCamelCase : int = inputs["""pixel_values"""].to(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = [el.to(_lowerCamelCase ) for el in inputs["""mask_labels"""]]
__lowerCamelCase : Union[str, Any] = [el.to(_lowerCamelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__lowerCamelCase : Dict = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 458
| 1
|
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = len(UpperCAmelCase__ )
for i in range(1 , UpperCAmelCase__ ):
a_ = collection[i]
a_ = 0
a_ = i - 1
while low <= high:
a_ = (low + high) // 2
if val < collection[mid]:
a_ = mid - 1
else:
a_ = mid + 1
for j in range(UpperCAmelCase__ , UpperCAmelCase__ , -1 ):
a_ = collection[j - 1]
a_ = val
return collection
if __name__ == "__main__":
A_ : List[str] =input("""Enter numbers separated by a comma:\n""").strip()
A_ : str =[int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 483
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Tuple =logging.get_logger(__name__)
A_ : int ={
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
snake_case_ = '''efficientformer'''
def __init__( self , _UpperCAmelCase = [3, 2, 6, 4] , _UpperCAmelCase = [48, 96, 224, 448] , _UpperCAmelCase = [True, True, True, True] , _UpperCAmelCase = 448 , _UpperCAmelCase = 32 , _UpperCAmelCase = 4 , _UpperCAmelCase = 7 , _UpperCAmelCase = 5 , _UpperCAmelCase = 8 , _UpperCAmelCase = 4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 16 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 1e-1_2 , _UpperCAmelCase = 224 , _UpperCAmelCase = 1e-0_5 , **_UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = hidden_sizes
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = num_channels
a_ = depths
a_ = mlp_expansion_ratio
a_ = downsamples
a_ = dim
a_ = key_dim
a_ = attention_ratio
a_ = resolution
a_ = pool_size
a_ = downsample_patch_size
a_ = downsample_stride
a_ = downsample_pad
a_ = drop_path_rate
a_ = num_metaad_blocks
a_ = distillation
a_ = use_layer_scale
a_ = layer_scale_init_value
a_ = image_size
a_ = batch_norm_eps
| 483
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 104
|
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = ['a', 'b', 'c']
# Defaults to last layer if both are None
snake_case ,snake_case = get_aligned_output_features_output_indices(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , ['c'] )
self.assertEqual(lowerCAmelCase , [2] )
# Out indices set to match out features
snake_case ,snake_case = get_aligned_output_features_output_indices(['a', 'c'] , lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , ['a', 'c'] )
self.assertEqual(lowerCAmelCase , [0, 2] )
# Out features set to match out indices
snake_case ,snake_case = get_aligned_output_features_output_indices(lowerCAmelCase , [0, 2] , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , ['a', 'c'] )
self.assertEqual(lowerCAmelCase , [0, 2] )
# Out features selected from negative indices
snake_case ,snake_case = get_aligned_output_features_output_indices(lowerCAmelCase , [-3, -1] , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , ['a', 'c'] )
self.assertEqual(lowerCAmelCase , [-3, -1] )
def snake_case ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , lowerCAmelCase )
# Out features must be a list
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(lowerCAmelCase , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(lowerCAmelCase , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def snake_case ( self ):
"""simple docstring"""
snake_case = BackboneMixin()
snake_case = ['a', 'b', 'c']
snake_case = ['a', 'c']
snake_case = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
snake_case = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
snake_case = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 104
| 1
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __A ):
__SCREAMING_SNAKE_CASE = ''''''
__SCREAMING_SNAKE_CASE = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : Optional[int] , a_ : Optional[Any] = None , a_ : Tuple = None , **a_ : int , ):
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
__snake_case = repo_info
__snake_case = token
__snake_case = None
def A ( self : List[str] ):
"""simple docstring"""
if self.dir_cache is None:
__snake_case = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__snake_case = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self : Dict , a_ : Tuple , a_ : str = "rb" , **a_ : Optional[Any] , ):
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__snake_case = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def A ( self : Dict , a_ : List[str] , **a_ : List[str] ):
"""simple docstring"""
self._get_dirs()
__snake_case = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def A ( self : str , a_ : Dict , a_ : List[Any]=False , **a_ : List[Any] ):
"""simple docstring"""
self._get_dirs()
__snake_case = PurePosixPath(path.strip("/" ) )
__snake_case = {}
for p, f in self.dir_cache.items():
__snake_case = PurePosixPath(p.strip("/" ) )
__snake_case = p.parent
if root == path:
__snake_case = f
__snake_case = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 69
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
__lowerCAmelCase = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def __UpperCamelCase ( lowercase_ : int ):
"""simple docstring"""
a_ = torch.load(lowercase_ , map_location='cpu' )
return sd
def __UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Union[str, Any]=rename_keys_prefix ):
"""simple docstring"""
a_ = OrderedDict()
a_ = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
a_ = key
for name_pair in rename_keys_prefix:
a_ = new_key.replace(name_pair[0] , name_pair[1] )
a_ = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
a_ = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def __UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
a_ = 'pretraining'
if "vcr" in checkpoint_path:
a_ = {'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
a_ = {'visual_embedding_dim': 2_048}
elif "vqa" in checkpoint_path:
a_ = {'visual_embedding_dim': 2_048}
elif "nlvr" in checkpoint_path:
a_ = {'visual_embedding_dim': 1_024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
a_ = {'visual_embedding_dim': 512}
a_ = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
a_ = {'visual_embedding_dim': 2_048}
a_ = 'vqa_advanced'
elif "vqa" in checkpoint_path:
a_ = {'visual_embedding_dim': 2_048, 'num_labels': 3_129}
a_ = 'vqa'
elif "nlvr" in checkpoint_path:
a_ = {
'visual_embedding_dim': 1_024,
'num_labels': 2,
}
a_ = 'nlvr'
a_ = VisualBertConfig(**lowercase_ )
# Load State Dict
a_ = load_state_dict(lowercase_ )
a_ = get_new_dict(lowercase_ , lowercase_ )
if model_type == "pretraining":
a_ = VisualBertForPreTraining(lowercase_ )
elif model_type == "vqa":
a_ = VisualBertForQuestionAnswering(lowercase_ )
elif model_type == "nlvr":
a_ = VisualBertForVisualReasoning(lowercase_ )
elif model_type == "multichoice":
a_ = VisualBertForMultipleChoice(lowercase_ )
model.load_state_dict(lowercase_ )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
__lowerCAmelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 536
| 0
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCamelCase ( __lowerCamelCase : Dict ) -> Tuple: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
_a = [1, 2, 3]
with pytest.raises(__lowerCamelCase ):
with parallel_backend("unsupported backend" ):
map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=2 )
with pytest.raises(__lowerCamelCase ):
with parallel_backend("unsupported backend" ):
map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCamelCase ( __lowerCamelCase : Dict ) -> int:
'''simple docstring'''
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) == expected_map_nested_sa
assert map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) == expected_map_nested_sa
assert map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) == expected_map_nested_sa
assert map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) == expected_map_nested_sa
assert map_nested(__lowerCamelCase , __lowerCamelCase , num_proc=__lowerCamelCase ) == expected_map_nested_sa
| 276
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Dict=1024 ) -> str:
'''simple docstring'''
_a , _a = [], []
_a = list(zip(__lowerCamelCase , __lowerCamelCase ) )
_a , _a = sorted_examples[0]
def is_too_big(__lowerCamelCase : Union[str, Any] ):
return tok(__lowerCamelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_a = new_src + " " + src
_a = new_tgt + " " + tgt
if is_too_big(__lowerCamelCase ) or is_too_big(__lowerCamelCase ): # cant fit, finalize example
finished_src.append(__lowerCamelCase )
finished_tgt.append(__lowerCamelCase )
_a , _a = src, tgt
else: # can fit, keep adding
_a , _a = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__lowerCamelCase )
finished_tgt.append(__lowerCamelCase )
return finished_src, finished_tgt
def __UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Path , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a = Path(__lowerCamelCase )
save_path.mkdir(exist_ok=__lowerCamelCase )
for split in ["train"]:
_a , _a = data_dir / F"{split}.source", data_dir / F"{split}.target"
_a = [x.rstrip() for x in Path(__lowerCamelCase ).open().readlines()]
_a = [x.rstrip() for x in Path(__lowerCamelCase ).open().readlines()]
_a , _a = pack_examples(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
print(F"packed {split} split from {len(__lowerCamelCase )} examples -> {len(__lowerCamelCase )}." )
Path(save_path / F"{split}.source" ).open("w" ).write("\n".join(__lowerCamelCase ) )
Path(save_path / F"{split}.target" ).open("w" ).write("\n".join(__lowerCamelCase ) )
for split in ["val", "test"]:
_a , _a = data_dir / F"{split}.source", data_dir / F"{split}.target"
shutil.copyfile(__lowerCamelCase , save_path / F"{split}.source" )
shutil.copyfile(__lowerCamelCase , save_path / F"{split}.target" )
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
_a = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=__lowerCamelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=__lowerCamelCase , default=128 )
parser.add_argument("--data_dir" , type=__lowerCamelCase )
parser.add_argument("--save_path" , type=__lowerCamelCase )
_a = parser.parse_args()
_a = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__lowerCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 276
| 1
|
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = StableUnCLIPPipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCAmelCase = False
def __a ( self : List[str] ):
A = 32
A = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
A = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
A = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_000 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
A = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
A = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
A = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
A = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='v_prediction' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
A = AutoencoderKL()
A = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def __a ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Tuple=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __a ( self : str ):
A = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def __a ( self : Optional[int] ):
A = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : str ):
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
A = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A = torch.Generator(device='cpu' ).manual_seed(0 )
A = pipe('anime turle' , generator=_lowercase , output_type='np' )
A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def __a ( self : Tuple ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
A = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
A = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 690
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690
| 1
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
__UpperCamelCase : Union[str, Any] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : Any = seq_length
__UpperCamelCase : Tuple = is_training
__UpperCamelCase : List[str] = use_input_mask
__UpperCamelCase : Tuple = use_token_type_ids
__UpperCamelCase : Union[str, Any] = use_labels
__UpperCamelCase : str = vocab_size
__UpperCamelCase : Tuple = hidden_size
__UpperCamelCase : Union[str, Any] = num_hidden_layers
__UpperCamelCase : List[str] = num_attention_heads
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : List[str] = hidden_act
__UpperCamelCase : List[str] = hidden_dropout_prob
__UpperCamelCase : Tuple = attention_probs_dropout_prob
__UpperCamelCase : int = max_position_embeddings
__UpperCamelCase : Union[str, Any] = type_vocab_size
__UpperCamelCase : Any = type_sequence_label_size
__UpperCamelCase : Dict = initializer_range
__UpperCamelCase : Dict = num_labels
__UpperCamelCase : str = num_choices
__UpperCamelCase : str = scope
def lowerCAmelCase ( self ):
__UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = None
if self.use_input_mask:
__UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Tuple = None
if self.use_token_type_ids:
__UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : Any = None
__UpperCamelCase : Dict = None
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase : Any = BioGptModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__UpperCamelCase : List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
__UpperCamelCase : Any = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
__UpperCamelCase : Optional[int] = BioGptForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__UpperCamelCase : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase ):
__UpperCamelCase : int = BioGptModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# create attention mask
__UpperCamelCase : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=_lowerCamelCase )
__UpperCamelCase : Dict = self.seq_length // 2
__UpperCamelCase : Any = 0
# first forward pass
__UpperCamelCase , __UpperCamelCase : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__UpperCamelCase : List[Any] = ids_tensor((1,) , _lowerCamelCase ).item() + 1
__UpperCamelCase : Dict = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__UpperCamelCase : str = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : str = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_lowerCamelCase )] , dim=1 , )
# get two different outputs
__UpperCamelCase : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase )['last_hidden_state']
__UpperCamelCase : List[Any] = model(_lowerCamelCase , past_key_values=_lowerCamelCase , attention_mask=_lowerCamelCase )['last_hidden_state']
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCamelCase : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase ):
__UpperCamelCase : int = BioGptModel(config=_lowerCamelCase ).to(_lowerCamelCase ).eval()
__UpperCamelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=_lowerCamelCase )
# first forward pass
__UpperCamelCase : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
__UpperCamelCase , __UpperCamelCase : str = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__UpperCamelCase : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Dict = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__UpperCamelCase : Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )['last_hidden_state']
__UpperCamelCase : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[
'last_hidden_state'
]
# select random slice
__UpperCamelCase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , _lowerCamelCase=False ):
__UpperCamelCase : Optional[Any] = BioGptForCausalLM(_lowerCamelCase )
model.to(_lowerCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCamelCase : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowerCAmelCase ( self , _lowerCamelCase , *_lowerCamelCase ):
__UpperCamelCase : Any = BioGptModel(_lowerCamelCase )
__UpperCamelCase : Any = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase ):
__UpperCamelCase : Optional[Any] = self.num_labels
__UpperCamelCase : Tuple = BioGptForTokenClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__UpperCamelCase : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
__UpperCamelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
def lowerCAmelCase ( self ):
__UpperCamelCase : Union[str, Any] = BioGptModelTester(self )
__UpperCamelCase : List[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=3_7 )
def lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ):
__UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase : str = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_lowerCamelCase , gradient_checkpointing=_lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_lowerCamelCase )
@slow
def lowerCAmelCase ( self ):
__UpperCamelCase : Union[str, Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(_lowerCamelCase )
__UpperCamelCase : str = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase : Any = 'left'
# Define PAD Token = EOS Token = 50256
__UpperCamelCase : Dict = tokenizer.eos_token
__UpperCamelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCamelCase : int = [
'Hello, my dog is a little',
'Today, I',
]
__UpperCamelCase : Tuple = tokenizer(_lowerCamelCase , return_tensors='pt' , padding=_lowerCamelCase )
__UpperCamelCase : Any = inputs['input_ids'].to(_lowerCamelCase )
__UpperCamelCase : str = model.generate(
input_ids=_lowerCamelCase , attention_mask=inputs['attention_mask'].to(_lowerCamelCase ) , )
__UpperCamelCase : str = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(_lowerCamelCase )
__UpperCamelCase : List[str] = model.generate(input_ids=_lowerCamelCase )
__UpperCamelCase : Tuple = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
__UpperCamelCase : List[str] = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(_lowerCamelCase )
__UpperCamelCase : Any = model.generate(input_ids=_lowerCamelCase , max_length=model.config.max_length - num_paddings )
__UpperCamelCase : List[Any] = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__UpperCamelCase : int = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCamelCase )
__UpperCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCamelCase )
__UpperCamelCase : Optional[int] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def lowerCAmelCase ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Any = BioGptModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = 3
__UpperCamelCase : List[Any] = input_dict['input_ids']
__UpperCamelCase : List[Any] = input_ids.ne(1 ).to(_lowerCamelCase )
__UpperCamelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : Dict = BioGptForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__UpperCamelCase : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase ( self ):
__UpperCamelCase , __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Optional[Any] = 3
__UpperCamelCase : Tuple = 'multi_label_classification'
__UpperCamelCase : Union[str, Any] = input_dict['input_ids']
__UpperCamelCase : List[Any] = input_ids.ne(1 ).to(_lowerCamelCase )
__UpperCamelCase : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCamelCase : Tuple = BioGptForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__UpperCamelCase : Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
__UpperCamelCase : Dict = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
__UpperCamelCase : Union[str, Any] = model(_lowerCamelCase )[0]
__UpperCamelCase : Union[str, Any] = 4_2_3_8_4
__UpperCamelCase : List[str] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _lowerCamelCase )
__UpperCamelCase : List[Any] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ):
__UpperCamelCase : str = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase : Union[str, Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(_lowerCamelCase )
torch.manual_seed(0 )
__UpperCamelCase : List[str] = tokenizer('COVID-19 is' , return_tensors='pt' ).to(_lowerCamelCase )
__UpperCamelCase : Dict = model.generate(
**_lowerCamelCase , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=_lowerCamelCase , )
__UpperCamelCase : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=_lowerCamelCase )
__UpperCamelCase : Dict = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 287
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a= {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
a= _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 287
| 1
|
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__magic_name__ : List[str] = float("""nan""")
class lowercase__ :
"""simple docstring"""
def __init__( self , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = sys.stdout
UpperCamelCase : str = open(_A , """a""" )
def __getattr__( self , _A ):
'''simple docstring'''
return getattr(self.stdout , _A )
def _a ( self , _A ):
'''simple docstring'''
self.stdout.write(_A )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""" , """""" , _A , 0 , re.M ) )
def UpperCamelCase (SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=False ):
UpperCamelCase : List[str] = []
# deal with critical env vars
UpperCamelCase : int = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
UpperCamelCase : str = os.environ.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
UpperCamelCase : Union[str, Any] = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(SCREAMING_SNAKE_CASE )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
UpperCamelCase : List[Any] = []
UpperCamelCase : Dict = """"""
while len(SCREAMING_SNAKE_CASE ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = """"""
return "\\\n".join(SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# unwrap multi-line input
UpperCamelCase : int = re.sub(r"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
UpperCamelCase : Union[str, Any] = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
UpperCamelCase : Optional[Any] = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
UpperCamelCase : Tuple = subprocess.run(SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE , text=SCREAMING_SNAKE_CASE )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
UpperCamelCase : List[Any] = variation.replace(""" """ , """-""" )
with open(Path(SCREAMING_SNAKE_CASE ) / f"""log.{prefix}.stdout.txt""" , """w""" ) as f:
f.write(result.stdout )
with open(Path(SCREAMING_SNAKE_CASE ) / f"""log.{prefix}.stderr.txt""" , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase : Optional[int] = json.load(SCREAMING_SNAKE_CASE )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
UpperCamelCase : Tuple = []
UpperCamelCase : List[Any] = []
UpperCamelCase : Tuple = f"""{id}: {variation:<{longest_variation_len}}"""
UpperCamelCase : List[str] = f"""{preamble}: """
UpperCamelCase : List[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(SCREAMING_SNAKE_CASE ) , desc=SCREAMING_SNAKE_CASE , leave=SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = process_run_single(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = single_run_metrics[target_metric_key]
if not math.isnan(SCREAMING_SNAKE_CASE ):
metrics.append(SCREAMING_SNAKE_CASE )
results.append(SCREAMING_SNAKE_CASE )
outcome += "✓"
else:
outcome += "✘"
UpperCamelCase : Any = f"""\33[2K\r{outcome}"""
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase : Optional[int] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
UpperCamelCase : Any = round(mean_metrics[target_metric_key] , 2 )
UpperCamelCase : Any = f"""{outcome} {mean_target}"""
if len(SCREAMING_SNAKE_CASE ) > 1:
results_str += f""" {tuple(round(SCREAMING_SNAKE_CASE , 2 ) for x in results )}"""
print(SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = variation
return mean_metrics
else:
print(SCREAMING_SNAKE_CASE )
return {variation_key: variation, target_metric_key: nan}
def UpperCamelCase ():
UpperCamelCase : int = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = pd.DataFrame(SCREAMING_SNAKE_CASE )
UpperCamelCase : str = """variation"""
UpperCamelCase : int = """diff_%"""
UpperCamelCase : List[str] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
UpperCamelCase : Union[str, Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(SCREAMING_SNAKE_CASE ):
# as a fallback, use the minimal value as the sentinel
UpperCamelCase : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = df.apply(
lambda SCREAMING_SNAKE_CASE : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
UpperCamelCase : Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
UpperCamelCase : Dict = df.reindex(SCREAMING_SNAKE_CASE , axis="""columns""" ) # reorder cols
# capitalize
UpperCamelCase : Any = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
UpperCamelCase : int = df.rename(lambda SCREAMING_SNAKE_CASE : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
UpperCamelCase : Tuple = df.rename(lambda SCREAMING_SNAKE_CASE : c.replace("""_""" , """\n""" ) , axis="""columns""" )
UpperCamelCase : List[str] = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=SCREAMING_SNAKE_CASE , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=SCREAMING_SNAKE_CASE , floatfmt=""".2f""" )]
print("""\n\n""".join(SCREAMING_SNAKE_CASE ) )
def UpperCamelCase ():
UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , nargs="""+""" , required=SCREAMING_SNAKE_CASE , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=SCREAMING_SNAKE_CASE , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=SCREAMING_SNAKE_CASE , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=SCREAMING_SNAKE_CASE , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=SCREAMING_SNAKE_CASE , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
UpperCamelCase : str = parser.parse_args()
UpperCamelCase : Union[str, Any] = args.output_dir
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = get_base_command(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# split each dimension into its --foo variations
UpperCamelCase : Union[str, Any] = [list(map(str.strip , re.split(r"""\|""" , SCREAMING_SNAKE_CASE ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
UpperCamelCase : int = list(map(str.strip , map(""" """.join , itertools.product(*SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : int = max(len(SCREAMING_SNAKE_CASE ) for x in variations )
# split wanted keys
UpperCamelCase : Union[str, Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
UpperCamelCase : List[str] = f"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
UpperCamelCase : Union[str, Any] = Tee(SCREAMING_SNAKE_CASE )
print(f"""\n*** Running {len(SCREAMING_SNAKE_CASE )} benchmarks:""" )
print(f"""Base command: {" ".join(SCREAMING_SNAKE_CASE )}""" )
UpperCamelCase : Optional[Any] = """variation"""
UpperCamelCase : Any = []
for id, variation in enumerate(tqdm(SCREAMING_SNAKE_CASE , desc="""Total completion: """ , leave=SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : Optional[int] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , args.target_metric_key , SCREAMING_SNAKE_CASE , args.repeat_times , SCREAMING_SNAKE_CASE , args.verbose , ) )
process_results(SCREAMING_SNAKE_CASE , args.target_metric_key , SCREAMING_SNAKE_CASE , args.base_variation , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 102
|
'''simple docstring'''
from torch import nn
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 474
| 0
|
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = ["""input_values""", """attention_mask"""]
def __init__( self , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1_60_00 , UpperCAmelCase_ = 0.0 , UpperCAmelCase_ = False , UpperCAmelCase_ = 80 , UpperCAmelCase_ = 16 , UpperCAmelCase_ = 64 , UpperCAmelCase_ = "hann_window" , UpperCAmelCase_ = 1.0 , UpperCAmelCase_ = 80 , UpperCAmelCase_ = 76_00 , UpperCAmelCase_ = 1e-1_0 , UpperCAmelCase_ = 2 , UpperCAmelCase_ = True , **UpperCAmelCase_ , ):
super().__init__(feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = do_normalize
snake_case_ = return_attention_mask
snake_case_ = num_mel_bins
snake_case_ = hop_length
snake_case_ = win_length
snake_case_ = win_function
snake_case_ = frame_signal_scale
snake_case_ = fmin
snake_case_ = fmax
snake_case_ = mel_floor
snake_case_ = reduction_factor
snake_case_ = win_length * sampling_rate // 10_00
snake_case_ = hop_length * sampling_rate // 10_00
snake_case_ = optimal_fft_length(self.sample_size )
snake_case_ = (self.n_fft // 2) + 1
snake_case_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCAmelCase_ )
snake_case_ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , UpperCAmelCase_ , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , UpperCAmelCase_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0.0 ):
if attention_mask is not None:
snake_case_ = np.array(UpperCAmelCase_ , np.intaa )
snake_case_ = []
for vector, length in zip(UpperCAmelCase_ , attention_mask.sum(-1 ) ):
snake_case_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
snake_case_ = padding_value
normed_input_values.append(UpperCAmelCase_ )
else:
snake_case_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def _lowercase ( self , UpperCAmelCase_ , ):
snake_case_ = spectrogram(
UpperCAmelCase_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
snake_case_ = self._process_audio(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
snake_case_ = None
if audio_target is not None:
snake_case_ = self._process_audio(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ , )
if inputs is None:
return inputs_target
else:
snake_case_ = inputs_target["input_values"]
snake_case_ = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
snake_case_ = decoder_attention_mask
return inputs
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
snake_case_ = isinstance(UpperCAmelCase_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
snake_case_ = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ):
snake_case_ = np.asarray(UpperCAmelCase_ , dtype=np.floataa )
elif isinstance(UpperCAmelCase_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
snake_case_ = speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ = [speech]
# needed to make pad() work on spectrogram inputs
snake_case_ = self.feature_size
# convert into correct format for padding
if is_target:
snake_case_ = [self._extract_mel_features(UpperCAmelCase_ ) for waveform in speech]
snake_case_ = BatchFeature({"input_values": features} )
snake_case_ = self.num_mel_bins
else:
snake_case_ = BatchFeature({"input_values": speech} )
snake_case_ = self.pad(
UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = feature_size_hack
# convert input values to correct format
snake_case_ = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
snake_case_ = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(UpperCAmelCase_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
snake_case_ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(UpperCAmelCase_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
snake_case_ = input_values.astype(np.floataa )
# convert attention_mask to correct format
snake_case_ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
snake_case_ = [np.asarray(UpperCAmelCase_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
snake_case_ = (
attention_mask
if self._get_padding_strategies(UpperCAmelCase_ , max_length=UpperCAmelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
snake_case_ = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=UpperCAmelCase_ , padding_value=self.padding_value )
if return_tensors is not None:
snake_case_ = padded_inputs.convert_to_tensors(UpperCAmelCase_ )
return padded_inputs
def _lowercase ( self ):
snake_case_ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
snake_case_ = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 718
|
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def _lowercase ( self ):
snake_case_ = Rectangle(height=0.5 , width=0.5 )
snake_case_ = Rectangle(height=0.25 , width=0.25 )
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("CPU" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
snake_case_ = [mem.copy() for i in range(4 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("GPU" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("Model" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
snake_case_ = []
snake_case_ = []
snake_case_ = []
for i, rect in enumerate(UpperCAmelCase_ ):
rect.set_stroke(UpperCAmelCase_ )
snake_case_ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=UpperCAmelCase_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCAmelCase_ , buff=0.0 )
self.add(UpperCAmelCase_ )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ , *UpperCAmelCase_ )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("Loaded Checkpoint" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCAmelCase_ )
snake_case_ = []
snake_case_ = []
for i, rect in enumerate(UpperCAmelCase_ ):
snake_case_ = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.7 )
target.move_to(UpperCAmelCase_ )
ckpt_arr.append(UpperCAmelCase_ )
snake_case_ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
snake_case_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
snake_case_ = MarkupText(
f'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
snake_case_ = [meta_mem.copy() for i in range(6 )]
snake_case_ = [meta_mem.copy() for i in range(6 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("Disk" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , Write(UpperCAmelCase_ , run_time=1 ) , Create(UpperCAmelCase_ , run_time=1 ) )
snake_case_ = []
for i, rect in enumerate(UpperCAmelCase_ ):
snake_case_ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCAmelCase_ , run_time=1.5 ) )
self.play(*UpperCAmelCase_ )
self.play(FadeOut(UpperCAmelCase_ ) )
snake_case_ = MarkupText(f'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
self.play(
FadeOut(UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ , *UpperCAmelCase_ ) , )
self.wait()
| 420
| 0
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=2 , _snake_case=8 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=16 , _snake_case=5 , _snake_case=2 , _snake_case=36 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
_UpperCAmelCase =parent
_UpperCAmelCase =batch_size
_UpperCAmelCase =seq_length
_UpperCAmelCase =is_training
_UpperCAmelCase =use_input_mask
_UpperCAmelCase =use_token_type_ids
_UpperCAmelCase =use_labels
_UpperCAmelCase =vocab_size
_UpperCAmelCase =hidden_size
_UpperCAmelCase =num_hidden_layers
_UpperCAmelCase =num_attention_heads
_UpperCAmelCase =intermediate_size
_UpperCAmelCase =hidden_act
_UpperCAmelCase =hidden_dropout_prob
_UpperCAmelCase =attention_probs_dropout_prob
_UpperCAmelCase =max_position_embeddings
_UpperCAmelCase =type_vocab_size
_UpperCAmelCase =type_sequence_label_size
_UpperCAmelCase =initializer_range
_UpperCAmelCase =num_labels
_UpperCAmelCase =num_choices
_UpperCAmelCase =scope
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase =None
if self.use_input_mask:
_UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase =None
if self.use_token_type_ids:
_UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase =None
_UpperCAmelCase =None
_UpperCAmelCase =None
if self.use_labels:
_UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase =ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_config()
_UpperCAmelCase =300
return config
def SCREAMING_SNAKE_CASE ( self ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) =self.prepare_config_and_inputs()
_UpperCAmelCase =True
_UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =MraModel(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
_UpperCAmelCase =model(_snake_case , token_type_ids=_snake_case )
_UpperCAmelCase =model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
_UpperCAmelCase =True
_UpperCAmelCase =MraModel(_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
_UpperCAmelCase =model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , )
_UpperCAmelCase =model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =MraForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =MraForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =self.num_labels
_UpperCAmelCase =MraForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =self.num_labels
_UpperCAmelCase =MraForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =self.num_choices
_UpperCAmelCase =MraForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase =model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) =config_and_inputs
_UpperCAmelCase ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =(
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case =False
snake_case =False
snake_case =False
snake_case =False
snake_case =()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =MraModelTester(self )
_UpperCAmelCase =ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase =type
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase =MraModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip(reason="MRA does not output attentions" )
def SCREAMING_SNAKE_CASE ( self ):
return
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =MraModel.from_pretrained("uw-madison/mra-base-512-4" )
_UpperCAmelCase =torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase =model(_snake_case )[0]
_UpperCAmelCase =torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _snake_case )
_UpperCAmelCase =torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
_UpperCAmelCase =torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase =model(_snake_case )[0]
_UpperCAmelCase =5_0265
_UpperCAmelCase =torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _snake_case )
_UpperCAmelCase =torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
_UpperCAmelCase =torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase =model(_snake_case )[0]
_UpperCAmelCase =5_0265
_UpperCAmelCase =torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _snake_case )
_UpperCAmelCase =torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1E-4 ) )
| 408
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =StableDiffusionSAGPipeline
snake_case =TEXT_TO_IMAGE_PARAMS
snake_case =TEXT_TO_IMAGE_BATCH_PARAMS
snake_case =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case =False
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_UpperCAmelCase =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_UpperCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase =CLIPTextModel(_snake_case )
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
_UpperCAmelCase =torch.manual_seed(_snake_case )
else:
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_UpperCAmelCase ={
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
_UpperCAmelCase =sag_pipe.to(_snake_case )
sag_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="."
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =sag_pipe(
[prompt] , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
_UpperCAmelCase =output.images
_UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase =np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_UpperCAmelCase =sag_pipe.to(_snake_case )
sag_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="."
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =sag_pipe(
[prompt] , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
_UpperCAmelCase =output.images
_UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase =np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_UpperCAmelCase =sag_pipe.to(_snake_case )
sag_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="."
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =sag_pipe(
[prompt] , width=768 , height=512 , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
_UpperCAmelCase =output.images
assert image.shape == (1, 512, 768, 3)
| 408
| 1
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
A : str = """docs/source/en/_toctree.yml"""
def snake_case_ ( a__ : Tuple ):
"""simple docstring"""
__lowercase = defaultdict(a__ )
__lowercase = []
__lowercase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(a__ )
__lowercase = new_doc_list
__lowercase = [key for key, value in counts.items() if value > 1]
__lowercase = []
for duplicate_key in duplicates:
__lowercase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(a__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
__lowercase = sorted(a__ ,key=lambda a__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(a__ ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(a__ )
# Sort
return overview_doc
def snake_case_ ( a__ : int=False ):
"""simple docstring"""
with open(a__ ,encoding="""utf-8""" ) as f:
__lowercase = yaml.safe_load(f.read() )
# Get to the API doc
__lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowercase = content[api_idx]["""sections"""]
# Then to the model doc
__lowercase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__lowercase = api_doc[scheduler_idx]["""sections"""]
__lowercase = clean_doc_toc(a__ )
__lowercase = False
if new_scheduler_doc != scheduler_doc:
__lowercase = True
if overwrite:
__lowercase = new_scheduler_doc
if diff:
if overwrite:
__lowercase = api_doc
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(yaml.dump(a__ ,allow_unicode=a__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def snake_case_ ( a__ : str=False ):
"""simple docstring"""
with open(a__ ,encoding="""utf-8""" ) as f:
__lowercase = yaml.safe_load(f.read() )
# Get to the API doc
__lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowercase = content[api_idx]["""sections"""]
# Then to the model doc
__lowercase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__lowercase = False
__lowercase = api_doc[pipeline_idx]["""sections"""]
__lowercase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__lowercase = pipeline_doc["""section"""]
__lowercase = clean_doc_toc(a__ )
if overwrite:
__lowercase = new_sub_pipeline_doc
new_pipeline_docs.append(a__ )
# sort overall pipeline doc
__lowercase = clean_doc_toc(a__ )
if new_pipeline_docs != pipeline_docs:
__lowercase = True
if overwrite:
__lowercase = new_pipeline_docs
if diff:
if overwrite:
__lowercase = api_doc
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(yaml.dump(a__ ,allow_unicode=a__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A : Optional[int] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 163
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class SCREAMING_SNAKE_CASE:
def __init__( self , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = str(id_ )
__lowercase = None
__lowercase = None
__lowercase = []
__lowercase = {} # {vertex:distance}
def __lt__( self , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
return self.key < other.key
def __repr__( self ) -> Any:
"""simple docstring"""
return self.id
def snake_case__ ( self , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
self.neighbors.append(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__lowercase = weight
def snake_case_ ( a__ : Optional[int] ,a__ : List[str] ,a__ : Dict ,a__ : List[str] ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] ,a__ )
graph[b - 1].add_edge(graph[a - 1] ,a__ )
def snake_case_ ( a__ : list ,a__ : Vertex ):
"""simple docstring"""
__lowercase = []
for u in graph:
__lowercase = math.inf
__lowercase = None
__lowercase = 0
__lowercase = graph[:]
while q:
__lowercase = min(a__ )
q.remove(a__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__lowercase = u
__lowercase = u.edges[v.id]
for i in range(1 ,len(a__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case_ ( a__ : list ,a__ : Vertex ):
"""simple docstring"""
for u in graph:
__lowercase = math.inf
__lowercase = None
__lowercase = 0
__lowercase = list(a__ )
hq.heapify(a__ )
while h:
__lowercase = hq.heappop(a__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__lowercase = u
__lowercase = u.edges[v.id]
hq.heapify(a__ )
for i in range(1 ,len(a__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = int(UpperCamelCase_ )
if n_element < 1:
__SCREAMING_SNAKE_CASE = ValueError("""a should be a positive number""" )
raise my_error
__SCREAMING_SNAKE_CASE = [1]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = (0, 0, 0)
__SCREAMING_SNAKE_CASE = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__magic_name__ = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
__magic_name__ = hamming(int(n))
print("-----------------------------------------------------")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("-----------------------------------------------------")
| 155
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
__SCREAMING_SNAKE_CASE = str(bin(UpperCamelCase_ ) )
binary_number += "0" * shift_amount
return binary_number
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
__SCREAMING_SNAKE_CASE = str(bin(UpperCamelCase_ ) )[2:]
if shift_amount >= len(UpperCamelCase_ ):
return "0b0"
__SCREAMING_SNAKE_CASE = binary_number[: len(UpperCamelCase_ ) - shift_amount]
return "0b" + shifted_binary_number
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if number >= 0: # Get binary representation of positive number
__SCREAMING_SNAKE_CASE = """0""" + str(bin(UpperCamelCase_ ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
__SCREAMING_SNAKE_CASE = len(bin(UpperCamelCase_ )[3:] ) # Find 2's complement of number
__SCREAMING_SNAKE_CASE = bin(abs(UpperCamelCase_ ) - (1 << binary_number_length) )[3:]
__SCREAMING_SNAKE_CASE = (
"""1""" + """0""" * (binary_number_length - len(UpperCamelCase_ )) + binary_number
)
if shift_amount >= len(UpperCamelCase_ ):
return "0b" + binary_number[0] * len(UpperCamelCase_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(UpperCamelCase_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155
| 1
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowercase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionControlNetImgaImgPipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
SCREAMING_SNAKE_CASE__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self ):
torch.manual_seed(0 )
__UpperCamelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
__UpperCamelCase : Any = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
__UpperCamelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
__UpperCamelCase : List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__UpperCamelCase : str = CLIPTextModel(_lowerCamelCase )
__UpperCamelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase : List[str] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ):
if str(_lowerCamelCase ).startswith('mps' ):
__UpperCamelCase : Optional[Any] = torch.manual_seed(_lowerCamelCase )
else:
__UpperCamelCase : Tuple = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
__UpperCamelCase : str = 2
__UpperCamelCase : Optional[Any] = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=_lowerCamelCase , device=torch.device(_lowerCamelCase ) , )
__UpperCamelCase : str = floats_tensor(control_image.shape , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__UpperCamelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : int = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('RGB' ).resize((6_4, 6_4) )
__UpperCamelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def lowerCAmelCase ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCAmelCase ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __lowercase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionControlNetImgaImgPipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCAmelCase ( self ):
torch.manual_seed(0 )
__UpperCamelCase : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(_lowerCamelCase ):
if isinstance(_lowerCamelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__UpperCamelCase : Any = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(_lowerCamelCase )
torch.manual_seed(0 )
__UpperCamelCase : str = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(_lowerCamelCase )
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
__UpperCamelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__UpperCamelCase : Tuple = CLIPTextModel(_lowerCamelCase )
__UpperCamelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase : List[str] = MultiControlNetModel([controlneta, controlneta] )
__UpperCamelCase : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ):
if str(_lowerCamelCase ).startswith('mps' ):
__UpperCamelCase : Optional[Any] = torch.manual_seed(_lowerCamelCase )
else:
__UpperCamelCase : Dict = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
__UpperCamelCase : Optional[Any] = 2
__UpperCamelCase : Optional[int] = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=_lowerCamelCase , device=torch.device(_lowerCamelCase ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=_lowerCamelCase , device=torch.device(_lowerCamelCase ) , ),
]
__UpperCamelCase : Tuple = floats_tensor(control_image[0].shape , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__UpperCamelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : Optional[int] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('RGB' ).resize((6_4, 6_4) )
__UpperCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def lowerCAmelCase ( self ):
__UpperCamelCase : Optional[Any] = self.get_dummy_components()
__UpperCamelCase : Any = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
__UpperCamelCase : Union[str, Any] = 1_0.0
__UpperCamelCase : str = 4
__UpperCamelCase : int = self.get_dummy_inputs(_lowerCamelCase )
__UpperCamelCase : Optional[int] = steps
__UpperCamelCase : List[str] = scale
__UpperCamelCase : Optional[Any] = pipe(**_lowerCamelCase )[0]
__UpperCamelCase : Dict = self.get_dummy_inputs(_lowerCamelCase )
__UpperCamelCase : List[str] = steps
__UpperCamelCase : Any = scale
__UpperCamelCase : Tuple = pipe(**_lowerCamelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__UpperCamelCase : str = self.get_dummy_inputs(_lowerCamelCase )
__UpperCamelCase : Union[str, Any] = steps
__UpperCamelCase : int = scale
__UpperCamelCase : Optional[int] = pipe(**_lowerCamelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__UpperCamelCase : List[str] = self.get_dummy_inputs(_lowerCamelCase )
__UpperCamelCase : Union[str, Any] = steps
__UpperCamelCase : List[Any] = scale
__UpperCamelCase : Dict = pipe(**_lowerCamelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def lowerCAmelCase ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCAmelCase ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = self.get_dummy_components()
__UpperCamelCase : int = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_lowerCamelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ):
__UpperCamelCase : str = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
__UpperCamelCase : Dict = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=_lowerCamelCase , controlnet=_lowerCamelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCamelCase : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase : int = 'evil space-punk bird'
__UpperCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_1_2, 5_1_2) )
__UpperCamelCase : List[Any] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_1_2, 5_1_2) )
__UpperCamelCase : Tuple = pipe(
_lowerCamelCase , _lowerCamelCase , control_image=_lowerCamelCase , generator=_lowerCamelCase , output_type='np' , num_inference_steps=5_0 , strength=0.6 , )
__UpperCamelCase : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
__UpperCamelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2
| 287
|
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a= {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _UpperCamelCase ( _a : int , _a : Union[str, Any] , _a : str , _a : List[str] , _a : Dict=False , _a : str=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__UpperCamelCase : List[Any] = cached_file(_a , _a , force_download=not use_cached_models )
__UpperCamelCase : Union[str, Any] = config_class.from_json_file(_a )
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : List[str] = True
print(f"""Building TensorFlow model from configuration: {config}""" )
__UpperCamelCase : Optional[int] = model_class(_a )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__UpperCamelCase : Union[str, Any] = cached_file(
_a , _a , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__UpperCamelCase : Any = load_pytorch_checkpoint_in_tfa_model(_a , _a )
if compare_with_pt_model:
__UpperCamelCase : Dict = tf_model(tf_model.dummy_inputs , training=_a ) # build the network
__UpperCamelCase : List[str] = torch.load(_a , map_location='cpu' )
__UpperCamelCase : Union[str, Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=_a , config=_a , state_dict=_a )
with torch.no_grad():
__UpperCamelCase : Any = pt_model(**pt_model.dummy_inputs )
__UpperCamelCase : Optional[Any] = pto[0].numpy()
__UpperCamelCase : int = tfo[0].numpy()
__UpperCamelCase : Union[str, Any] = np.amax(np.abs(np_pt - np_tf ) )
print(f"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, f"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(f"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(_a , save_format='h5' )
def _UpperCamelCase ( _a : str , _a : Optional[Any] , _a : Dict=None , _a : Tuple=None , _a : int=False , _a : Union[str, Any]=False , _a : int=False , _a : List[str]=False , ):
"""simple docstring"""
if args_model_type is None:
__UpperCamelCase : Dict = list(MODEL_CLASSES.keys() )
else:
__UpperCamelCase : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(_a , start=1 ):
print('=' * 1_0_0 )
print(f""" Converting model type {j}/{len(_a )}: {model_type}""" )
print('=' * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__UpperCamelCase : List[str] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__UpperCamelCase : Optional[int] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(_a , _a ) , start=1 ):
print('-' * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
__UpperCamelCase : Dict = model_shortcut_name
elif only_convert_finetuned_models:
print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
f""" Converting checkpoint {i}/{len(_a )}: {model_shortcut_name} - model_type {model_type}""" )
print('-' * 1_0_0 )
if config_shortcut_name in aws_config_map:
__UpperCamelCase : Tuple = cached_file(_a , _a , force_download=not use_cached_models )
else:
__UpperCamelCase : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__UpperCamelCase : Dict = cached_file(_a , _a , force_download=not use_cached_models )
else:
__UpperCamelCase : Tuple = model_shortcut_name
if os.path.isfile(_a ):
__UpperCamelCase : int = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=_a , pytorch_checkpoint_path=_a , config_file=_a , tf_dump_path=os.path.join(_a , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=_a , )
if remove_cached_files:
os.remove(_a )
os.remove(_a )
if __name__ == "__main__":
a= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
a= parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 287
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class UpperCamelCase ( snake_case__ ):
__UpperCamelCase = """ctrl"""
__UpperCamelCase = ["""past_key_values"""]
__UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_lowerCAmelCase : Optional[Any]=246_534 ,_lowerCAmelCase : Tuple=256 ,_lowerCAmelCase : int=1_280 ,_lowerCAmelCase : Any=8_192 ,_lowerCAmelCase : List[str]=48 ,_lowerCAmelCase : List[Any]=16 ,_lowerCAmelCase : List[Any]=0.1 ,_lowerCAmelCase : List[str]=0.1 ,_lowerCAmelCase : Tuple=1E-6 ,_lowerCAmelCase : Union[str, Any]=0.0_2 ,_lowerCAmelCase : Dict=True ,**_lowerCAmelCase : Tuple ,):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = n_positions
__snake_case = n_embd
__snake_case = n_layer
__snake_case = n_head
__snake_case = dff
__snake_case = resid_pdrop
__snake_case = embd_pdrop
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = use_cache
super().__init__(**_lowerCAmelCase )
| 524
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase ( snake_case__ ):
def __init__( self : Union[str, Any] ,_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__snake_case = data
def __iter__( self : Optional[Any] ):
"""simple docstring"""
for element in self.data:
yield element
def _lowerCamelCase( __snake_case=True ) -> str:
__snake_case = Accelerator(even_batches=__snake_case )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> int:
if iterable:
__snake_case = DummyIterableDataset(torch.as_tensor(range(__snake_case ) ) )
else:
__snake_case = TensorDataset(torch.as_tensor(range(__snake_case ) ) )
__snake_case = DataLoader(__snake_case , batch_size=__snake_case )
__snake_case = accelerator.prepare(__snake_case )
return dl
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> str:
__snake_case = create_dataloader(accelerator=__snake_case , dataset_size=__snake_case , batch_size=__snake_case )
__snake_case = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _lowerCamelCase( ) -> List[Any]:
__snake_case = create_accelerator(even_batches=__snake_case )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator(even_batches=__snake_case )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
__snake_case = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__snake_case ):
__snake_case = ddp_model(batch[0].float() )
__snake_case = output.sum()
loss.backward()
batch_idxs.append(__snake_case )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _lowerCamelCase( __snake_case ) -> Any:
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for multi-GPU" in str(w[-1].message )
def _lowerCamelCase( ) -> List[str]:
__snake_case = True
__snake_case = False
__snake_case = create_accelerator(even_batches=__snake_case )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
__snake_case = train_dl.batch_sampler.even_batches
__snake_case = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _lowerCamelCase( ) -> List[str]:
__snake_case = True
__snake_case = False
__snake_case = create_accelerator(even_batches=__snake_case )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
__snake_case = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
__snake_case = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator()
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for map-style datasets" in str(w[-1].message )
def _lowerCamelCase( ) -> Optional[Any]:
__snake_case = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
__snake_case = accelerator.state.distributed_type
__snake_case = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__snake_case )
__snake_case = original_state
if __name__ == "__main__":
main()
| 524
| 1
|
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=10_24 ) -> Optional[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
SCREAMING_SNAKE_CASE = list(zip(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sorted_examples[0]
def is_too_big(SCREAMING_SNAKE_CASE_ : Tuple ):
return tok(_UpperCamelCase , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
SCREAMING_SNAKE_CASE = new_src + ' ' + src
SCREAMING_SNAKE_CASE = new_tgt + ' ' + tgt
if is_too_big(_UpperCamelCase ) or is_too_big(_UpperCamelCase ): # cant fit, finalize example
finished_src.append(_UpperCamelCase )
finished_tgt.append(_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_UpperCamelCase )
finished_tgt.append(_UpperCamelCase )
return finished_src, finished_tgt
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE = Path(_UpperCamelCase )
save_path.mkdir(exist_ok=_UpperCamelCase )
for split in ["train"]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data_dir / F'{split}.source', data_dir / F'{split}.target'
SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(_UpperCamelCase ).open().readlines()]
SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(_UpperCamelCase ).open().readlines()]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pack_examples(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
print(F'packed {split} split from {len(_UpperCamelCase )} examples -> {len(_UpperCamelCase )}.' )
Path(save_path / F'{split}.source' ).open('w' ).write('\n'.join(_UpperCamelCase ) )
Path(save_path / F'{split}.target' ).open('w' ).write('\n'.join(_UpperCamelCase ) )
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(_UpperCamelCase , save_path / F'{split}.source' )
shutil.copyfile(_UpperCamelCase , save_path / F'{split}.target' )
def lowercase () -> List[Any]:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=_UpperCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=_UpperCamelCase , default=1_28 )
parser.add_argument('--data_dir' , type=_UpperCamelCase )
parser.add_argument('--save_path' , type=_UpperCamelCase )
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_UpperCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 716
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = """decision_transformer"""
SCREAMING_SNAKE_CASE_ : int = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase__=17 , lowerCAmelCase__=4 , lowerCAmelCase__=128 , lowerCAmelCase__=4_096 , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=1_024 , lowerCAmelCase__=3 , lowerCAmelCase__=1 , lowerCAmelCase__=None , lowerCAmelCase__="relu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=50_256 , lowerCAmelCase__=50_256 , lowerCAmelCase__=False , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> List[Any]:
SCREAMING_SNAKE_CASE = state_dim
SCREAMING_SNAKE_CASE = act_dim
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = max_ep_len
SCREAMING_SNAKE_CASE = action_tanh
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 327
| 0
|
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__UpperCamelCase : Any = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__UpperCamelCase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28
| 0
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__lowerCAmelCase : Tuple = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
__lowerCAmelCase : Any = 10
__lowerCAmelCase : Dict = 256
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
if len(UpperCamelCase__ ) < MIN_NUM_TOKENS:
return None
__UpperCAmelCase = MinHash(num_perm=UpperCamelCase__ )
for token in set(UpperCamelCase__ ):
min_hash.update(token.encode() )
return min_hash
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(UpperCamelCase__ ) if len(t.strip() ) > 0}
class A :
def __init__( self : List[Any] , *,
__a : float = 0.8_5 , ) -> Optional[int]:
__UpperCAmelCase = duplication_jaccard_threshold
__UpperCAmelCase = NUM_PERM
__UpperCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__UpperCAmelCase = defaultdict(__a )
def snake_case__ ( self : Dict , __a : Tuple , __a : MinHash ) -> None:
__UpperCAmelCase = self._index.query(__a )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def snake_case__ ( self : str ) -> List[List[Dict]]:
__UpperCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
__UpperCAmelCase = [base] + list(__a )
# reformat the cluster to be a list of dict
__UpperCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def snake_case__ ( self : int , __a : str ) -> None:
__UpperCAmelCase = self.get_duplicate_clusters()
with open(__a , '''w''' ) as f:
json.dump(__a , __a )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = element
__UpperCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase ( UpperCamelCase__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(UpperCamelCase__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def lowerCAmelCase ( UpperCamelCase__ : Type[Dataset] , UpperCamelCase__ : float ):
"""simple docstring"""
__UpperCAmelCase = DuplicationIndex(duplication_jaccard_threshold=UpperCamelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(UpperCamelCase__ ) ) , max_queue_size=1_0_0 ) ):
di.add(UpperCamelCase__ , UpperCamelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = get_tokens(UpperCamelCase__ )
__UpperCAmelCase = get_tokens(UpperCamelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__lowerCAmelCase : str = None
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = []
for elementa in cluster:
__UpperCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__UpperCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(UpperCamelCase__ , UpperCamelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__UpperCAmelCase = 1
extremes.append(UpperCamelCase__ )
return extremes
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
global _shared_dataset
__UpperCAmelCase = dataset
__UpperCAmelCase = []
__UpperCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=UpperCamelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
UpperCamelCase__ , UpperCamelCase__ , ) , total=len(UpperCamelCase__ ) , ):
extremes_list.append(UpperCamelCase__ )
return extremes_list
def lowerCAmelCase ( UpperCamelCase__ : Type[Dataset] , UpperCamelCase__ : float = 0.85 ):
"""simple docstring"""
__UpperCAmelCase = make_duplicate_clusters(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__UpperCAmelCase = {}
__UpperCAmelCase = find_extremes(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for extremes in extremes_clusters:
for element in extremes:
__UpperCAmelCase = element
__UpperCAmelCase = duplicate_indices - set(extreme_dict.keys() )
__UpperCAmelCase = dataset.filter(lambda UpperCamelCase__ , UpperCamelCase__ : idx not in remove_indices , with_indices=UpperCamelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__UpperCAmelCase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__UpperCAmelCase = extreme_dict[element['''base_index''']]['''copies''']
print(f"""Original dataset size: {len(UpperCamelCase__ )}""" )
print(f"""Number of duplicate clusters: {len(UpperCamelCase__ )}""" )
print(f"""Files in duplicate cluster: {len(UpperCamelCase__ )}""" )
print(f"""Unique files in duplicate cluster: {len(UpperCamelCase__ )}""" )
print(f"""Filtered dataset size: {len(UpperCamelCase__ )}""" )
return ds_filter, duplicate_clusters
| 710
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654
| 0
|
import math
import random
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowercase_ = 0.02
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(_SCREAMING_SNAKE_CASE ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 100) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input("""Expected value: """))
lowercase_ = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 235
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
lowercase__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase__ = set()
return any(
node not in visited and depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for node in graph )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
visited.add(_SCREAMING_SNAKE_CASE )
rec_stk.add(_SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 235
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Tuple = GPTaTokenizer
a_ : Optional[int] = GPTaTokenizerFast
a_ : str = True
a_ : Union[str, Any] = {"""add_prefix_space""": True}
a_ : List[str] = False
def UpperCAmelCase__ ( self) ->Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
a_ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase))))
a_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a_ = {"unk_token": "<unk>"}
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__UpperCAmelCase) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(__UpperCAmelCase))
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Dict:
kwargs.update(self.special_tokens_map)
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->str:
kwargs.update(self.special_tokens_map)
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str:
a_ = "lower newer"
a_ = "lower newer"
return input_text, output_text
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a_ = "lower newer"
a_ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
a_ = tokenizer.tokenize(__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a_ = tokens + [tokenizer.unk_token]
a_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Union[str, Any]:
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer(add_prefix_space=__UpperCAmelCase)
a_ = "lower newer"
# Testing tokenization
a_ = tokenizer.tokenize(__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
a_ = rust_tokenizer.tokenize(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
# Testing conversion to ids without special tokens
a_ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
a_ = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
# Testing conversion to ids with special tokens
a_ = self.get_rust_tokenizer(add_prefix_space=__UpperCAmelCase)
a_ = tokenizer.encode(__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
a_ = rust_tokenizer.encode(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
# Testing the unknown token
a_ = tokens + [rust_tokenizer.unk_token]
a_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , __UpperCAmelCase)
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Dict:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCAmelCase__ ( self , __UpperCAmelCase=15) ->List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
a_ = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase)
# Simple input
a_ = "This is a simple input"
a_ = ["This is a simple input 1", "This is a simple input 2"]
a_ = ("This is a simple input", "This is a pair")
a_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length")
# Simple input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length")
# Simple input
self.assertRaises(
__UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length")
# Pair input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length")
# Pair input
self.assertRaises(
__UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding="max_length" , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>")
# Simple input
a_ = "This is a simple input"
a_ = ["This is a simple input looooooooong", "This is a simple input"]
a_ = ("This is a simple input", "This is a pair")
a_ = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
a_ = tokenizer.pad_token_id
a_ = tokenizer(__UpperCAmelCase , padding="max_length" , max_length=30 , return_tensors="np")
a_ = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncate=__UpperCAmelCase , return_tensors="np")
a_ = tokenizer(*__UpperCAmelCase , padding="max_length" , max_length=60 , return_tensors="np")
a_ = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncate=__UpperCAmelCase , return_tensors="np")
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30)
self.assertTrue(pad_token_id in out_s["input_ids"])
self.assertTrue(0 in out_s["attention_mask"])
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0])
self.assertFalse(0 in out_sa["attention_mask"][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1])
self.assertTrue(0 in out_sa["attention_mask"][1])
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60)
self.assertTrue(pad_token_id in out_p["input_ids"])
self.assertTrue(0 in out_p["attention_mask"])
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0])
self.assertFalse(0 in out_pa["attention_mask"][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1])
self.assertTrue(0 in out_pa["attention_mask"][1])
def UpperCAmelCase__ ( self) ->Tuple:
a_ = "$$$"
a_ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__UpperCAmelCase , add_bos_token=__UpperCAmelCase)
a_ = "This is a simple input"
a_ = ["This is a simple input 1", "This is a simple input 2"]
a_ = tokenizer.bos_token_id
a_ = tokenizer(__UpperCAmelCase)
a_ = tokenizer(__UpperCAmelCase)
self.assertEqual(out_s.input_ids[0] , __UpperCAmelCase)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
a_ = tokenizer.decode(out_s.input_ids)
a_ = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , __UpperCAmelCase)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
def UpperCAmelCase__ ( self) ->str:
pass
def UpperCAmelCase__ ( self) ->Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
a_ = [self.get_tokenizer(do_lower_case=__UpperCAmelCase , add_bos_token=__UpperCAmelCase)]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}'''):
a_ = "Encode this."
a_ = "This one too please."
a_ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
encoded_sequence += tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
a_ = tokenizer.encode_plus(
__UpperCAmelCase , __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , )
a_ = encoded_sequence_dict["input_ids"]
a_ = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__UpperCAmelCase) , len(__UpperCAmelCase))
a_ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__UpperCAmelCase)
]
a_ = [x for x in filtered_sequence if x is not None]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
@require_tokenizers
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->int:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
a_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__UpperCAmelCase)
a_ = "A photo of a cat"
a_ = tokenizer.encode(
__UpperCAmelCase , )
self.assertEqual(__UpperCAmelCase , [2, 2_50, 13_45, 9, 10, 47_58])
tokenizer.save_pretrained("test_opt")
a_ = AutoTokenizer.from_pretrained("./test_opt")
a_ = tokenizer.encode(
__UpperCAmelCase , )
self.assertEqual(__UpperCAmelCase , [2, 2_50, 13_45, 9, 10, 47_58])
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__UpperCAmelCase)
a_ = "A photo of a cat"
a_ = tokenizer.encode(
__UpperCAmelCase , )
# Same as above
self.assertEqual(__UpperCAmelCase , [2, 2_50, 13_45, 9, 10, 47_58])
@unittest.skip("This test is failing because of a bug in the fast tokenizer")
def UpperCAmelCase__ ( self) ->List[str]:
a_ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__UpperCAmelCase)
a_ = "bos"
a_ = tokenizer.get_vocab()["bos"]
a_ = "A photo of a cat"
a_ = tokenizer.encode(
__UpperCAmelCase , )
# We changed the bos token
self.assertEqual(__UpperCAmelCase , [3_19_57, 2_50, 13_45, 9, 10, 47_58])
tokenizer.save_pretrained("./tok")
a_ = AutoTokenizer.from_pretrained("./tok")
self.assertTrue(tokenizer.is_fast)
a_ = tokenizer.encode(
__UpperCAmelCase , )
self.assertEqual(__UpperCAmelCase , [3_19_57, 2_50, 13_45, 9, 10, 47_58])
| 713
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = "hf-internal-testing/tiny-random-t5"
a_ = AutoTokenizer.from_pretrained(__UpperCAmelCase)
a_ = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase)
a_ = tokenizer("This is me" , return_tensors="pt")
a_ = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules()))
a_ = model.generate(**__UpperCAmelCase)
a_ = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules()))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase)
a_ = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase)
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules()))
a_ = model_reloaded.generate(**__UpperCAmelCase)
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase))
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = "hf-internal-testing/tiny-random-t5"
a_ = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase)
a_ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__UpperCAmelCase):
model.save_pretrained(__UpperCAmelCase)
a_ = model.reverse_bettertransformer()
model.save_pretrained(__UpperCAmelCase)
| 210
| 0
|
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=99 , _lowerCAmelCase=0 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase="last" , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=0 , ) -> Any:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_lengths
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = gelu_activation
_lowerCAmelCase = sinusoidal_embeddings
_lowerCAmelCase = causal
_lowerCAmelCase = asm
_lowerCAmelCase = n_langs
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_special
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = summary_type
_lowerCAmelCase = use_proj
_lowerCAmelCase = scope
_lowerCAmelCase = bos_token_id
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_input_lengths:
_lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self ) -> List[str]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = XLMModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , lengths=_lowerCAmelCase , langs=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , langs=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = XLMWithLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> str:
_lowerCAmelCase = XLMForQuestionAnsweringSimple(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase )
_lowerCAmelCase = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Tuple:
_lowerCAmelCase = XLMForQuestionAnswering(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase )
_lowerCAmelCase = model(
_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , cls_index=_lowerCAmelCase , is_impossible=_lowerCAmelCase , p_mask=_lowerCAmelCase , )
_lowerCAmelCase = model(
_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , cls_index=_lowerCAmelCase , is_impossible=_lowerCAmelCase , )
((_lowerCAmelCase) , ) = result_with_labels.to_tuple()
_lowerCAmelCase = model(_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase )
((_lowerCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Union[str, Any]:
_lowerCAmelCase = XLMForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = XLMForTokenClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Union[str, Any]:
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = XLMForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase : Tuple = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__lowerCamelCase : List[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = XLMModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , emb_dim=37 )
def _snake_case ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1 ) -> int:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(
[isinstance(_lowerCAmelCase , _lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(_lowerCAmelCase ) )
self.assertEqual(len(_lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_lowerCAmelCase ):
# adds PAD dummy token
_lowerCAmelCase = min_length + idx + 1
_lowerCAmelCase = min_length + idx + 1
_lowerCAmelCase = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_lowerCAmelCase ) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=1 ) -> Union[str, Any]:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(
[isinstance(_lowerCAmelCase , _lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(_lowerCAmelCase ) , )
self.assertEqual(len(_lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_lowerCAmelCase ):
# adds PAD dummy token
_lowerCAmelCase = min_length + idx + 1
_lowerCAmelCase = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_lowerCAmelCase ) , )
pass
@slow
def _snake_case ( self ) -> str:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = XLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(_lowerCAmelCase )
_lowerCAmelCase = torch.tensor([[14, 447]] , dtype=torch.long , device=_lowerCAmelCase ) # the president
_lowerCAmelCase = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowerCAmelCase = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _lowerCAmelCase )
| 18
|
import argparse
import json
from tqdm import tqdm
def A__ ( ):
'''simple docstring'''
snake_case__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=_a , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=_a , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=_a , help="""where to store parsed gold_data_path file""" , )
snake_case__ : Dict =parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
snake_case__ : Any =json.load(_a )
for dpr_record in tqdm(_a ):
snake_case__ : int =dpr_record["""question"""]
snake_case__ : str =[context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(_a ) + """\n""" )
if __name__ == "__main__":
main()
| 385
| 0
|
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
# load base model
a : List[Any] = StableDiffusionPipeline.from_pretrained(_A , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
a : Optional[int] = load_file(_A )
a : int = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
a : int = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
a : Any = pipeline.text_encoder
else:
a : Union[str, Any] = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
a : str = pipeline.unet
# find the target layer
a : Optional[Any] = layer_infos.pop(0 )
while len(_A ) > -1:
try:
a : Optional[int] = curr_layer.__getattr__(_A )
if len(_A ) > 0:
a : Union[str, Any] = layer_infos.pop(0 )
elif len(_A ) == 0:
break
except Exception:
if len(_A ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
a : List[str] = layer_infos.pop(0 )
a : str = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(_A )
else:
pair_keys.append(_A )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
a : Optional[Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
a : str = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_A , _A ).unsqueeze(2 ).unsqueeze(3 )
else:
a : str = state_dict[pair_keys[0]].to(torch.floataa )
a : str = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_A , _A )
# update visited list
for item in pair_keys:
visited.append(_A )
return pipeline
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
lowerCAmelCase: List[str] = parser.parse_args()
lowerCAmelCase: List[str] = args.base_model_path
lowerCAmelCase: List[str] = args.checkpoint_path
lowerCAmelCase: List[str] = args.dump_path
lowerCAmelCase: List[Any] = args.lora_prefix_unet
lowerCAmelCase: Optional[int] = args.lora_prefix_text_encoder
lowerCAmelCase: Optional[Any] = args.alpha
lowerCAmelCase: Optional[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCAmelCase: Union[str, Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 195
|
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCamelCase__ ( _A ):
return EnvironmentCommand()
class a__( lowerCamelCase__ ):
@staticmethod
def lowercase_ ( __snake_case : ArgumentParser ):
a : Tuple = parser.add_parser('env' )
download_parser.set_defaults(func=__snake_case )
def lowercase_ ( self : List[str] ):
a : str = huggingface_hub.__version__
a : List[str] = 'not installed'
a : List[str] = 'NA'
if is_torch_available():
import torch
a : Optional[Any] = torch.__version__
a : int = torch.cuda.is_available()
a : Optional[int] = 'not installed'
if is_transformers_available():
import transformers
a : Tuple = transformers.__version__
a : Dict = 'not installed'
if is_accelerate_available():
import accelerate
a : int = accelerate.__version__
a : Any = 'not installed'
if is_xformers_available():
import xformers
a : Optional[int] = xformers.__version__
a : List[str] = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(__snake_case ) )
return info
@staticmethod
def lowercase_ ( __snake_case : Union[str, Any] ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 195
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = SwinvaConfig()
_lowerCamelCase : List[Any] = swinva_name.split("_" )
_lowerCamelCase : Optional[int] = name_split[1]
if "to" in name_split[3]:
_lowerCamelCase : Optional[int] = int(name_split[3][-3:] )
else:
_lowerCamelCase : Tuple = int(name_split[3] )
if "to" in name_split[2]:
_lowerCamelCase : str = int(name_split[2][-2:] )
else:
_lowerCamelCase : str = int(name_split[2][6:] )
if model_size == "tiny":
_lowerCamelCase : Optional[Any] = 96
_lowerCamelCase : List[Any] = (2, 2, 6, 2)
_lowerCamelCase : int = (3, 6, 12, 24)
elif model_size == "small":
_lowerCamelCase : Optional[int] = 96
_lowerCamelCase : int = (2, 2, 18, 2)
_lowerCamelCase : List[str] = (3, 6, 12, 24)
elif model_size == "base":
_lowerCamelCase : Dict = 128
_lowerCamelCase : List[str] = (2, 2, 18, 2)
_lowerCamelCase : Dict = (4, 8, 16, 32)
else:
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Any = (2, 2, 18, 2)
_lowerCamelCase : Optional[int] = (6, 12, 24, 48)
if "to" in swinva_name:
_lowerCamelCase : Optional[int] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_lowerCamelCase : Dict = 21841
_lowerCamelCase : List[Any] = "huggingface/label-files"
_lowerCamelCase : List[Any] = "imagenet-22k-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : int = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Union[str, Any] = idalabel
_lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
_lowerCamelCase : str = 1000
_lowerCamelCase : int = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Tuple = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Union[str, Any] = idalabel
_lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = img_size
_lowerCamelCase : Tuple = num_classes
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : List[Any] = num_heads
_lowerCamelCase : List[Any] = window_size
return config
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if "patch_embed.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_lowerCamelCase : List[str] = "encoder." + name
if "attn.proj" in name:
_lowerCamelCase : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_lowerCamelCase : int = name.replace("attn" , "attention.self" )
if "norm1" in name:
_lowerCamelCase : str = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_lowerCamelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_lowerCamelCase : str = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_lowerCamelCase : int = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
_lowerCamelCase : Union[str, Any] = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
_lowerCamelCase : List[Any] = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
_lowerCamelCase : Any = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
_lowerCamelCase : Any = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
_lowerCamelCase : List[str] = "layernorm.weight"
if name == "norm.bias":
_lowerCamelCase : List[Any] = "layernorm.bias"
if "head" in name:
_lowerCamelCase : Tuple = name.replace("head" , "classifier" )
else:
_lowerCamelCase : Any = "swinv2." + name
return name
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Any = orig_state_dict.pop(_lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
_lowerCamelCase : str = key.split("." )
_lowerCamelCase : Any = int(key_split[1] )
_lowerCamelCase : Optional[Any] = int(key_split[3] )
_lowerCamelCase : Tuple = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase : Optional[int] = val[:dim, :]
_lowerCamelCase : Dict = val[dim : dim * 2, :]
_lowerCamelCase : Union[str, Any] = val[-dim:, :]
else:
_lowerCamelCase : int = val[:dim]
_lowerCamelCase : str = val[
dim : dim * 2
]
_lowerCamelCase : str = val[-dim:]
else:
_lowerCamelCase : List[Any] = val
return orig_state_dict
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
_lowerCamelCase : Dict = get_swinva_config(_lowerCAmelCase )
_lowerCamelCase : Any = SwinvaForImageClassification(_lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Tuple = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
_lowerCamelCase : Tuple = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
_lowerCamelCase : Any = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Dict = timm_model(inputs["pixel_values"] )
_lowerCamelCase : List[Any] = model(**_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 44
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
A = logging.getLogger()
A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
snake_case_ = {'source': 'What is love ?', 'target': 'life'}
snake_case_ = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
snake_case_ = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(__UpperCamelCase , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = "pytorch" ):
"""simple docstring"""
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = os.path.join(__UpperCamelCase , 'output' )
snake_case_ = os.path.join(__UpperCamelCase , 'data' )
self._create_dummy_data(data_dir=__UpperCamelCase )
snake_case_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
snake_case_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__UpperCamelCase , env=self.get_env() )
snake_case_ = os.path.join(__UpperCamelCase , 'metrics.json' )
with open(__UpperCamelCase ) as f:
snake_case_ = json.load(__UpperCamelCase )
return result
@require_torch_gpu
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 187
| 0
|
"""simple docstring"""
from math import factorial
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
snake_case : Any = [1] * rank
else:
snake_case : Dict = rank
def __repr__( self ) -> List[str]:
'''simple docstring'''
return (
F'{self.real}+'
F'{"+".join(str(lowerCamelCase__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
snake_case : Optional[int] = self.duals.copy()
snake_case : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
snake_case : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
__UpperCAmelCase : Tuple = __add__
def __sub__( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
snake_case : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
snake_case : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = __mul__
def __truediv__( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
snake_case : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
snake_case : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
snake_case : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Optional[int] ) -> Tuple:
"""simple docstring"""
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
snake_case : int = Dual(__lowerCAmelCase , 1 )
snake_case : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase ( lowercase : Any ) -> Any:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 718
|
"""simple docstring"""
from __future__ import annotations
__snake_case = list[tuple[int, int]]
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> str:
'''simple docstring'''
snake_case : int = pos_x
snake_case : List[str] = pos_y
snake_case : List[Any] = (pos_y, pos_x)
snake_case : Optional[int] = goal_x
snake_case : Dict = goal_y
snake_case : Any = g_cost
snake_case : List[Any] = parent
snake_case : Union[str, Any] = self.calculate_heuristic()
def lowerCamelCase ( self ) -> float:
'''simple docstring'''
snake_case : Optional[Any] = abs(self.pos_x - self.goal_x )
snake_case : Dict = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , UpperCamelCase__ ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase__ )
snake_case : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , UpperCamelCase__ )
snake_case : Tuple = [self.start]
snake_case : list[Node] = []
snake_case : Dict = False
def lowerCamelCase ( self ) -> Path | None:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case : Tuple = True
return self.retrace_path(UpperCamelCase__ )
self.closed_nodes.append(UpperCamelCase__ )
snake_case : Optional[Any] = self.get_successors(UpperCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
snake_case : Dict = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase__ )
else:
self.open_nodes.append(UpperCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase ( self , UpperCamelCase__ ) -> list[Node]:
'''simple docstring'''
snake_case : Dict = []
for action in delta:
snake_case : Union[str, Any] = parent.pos_x + action[1]
snake_case : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase__ , ) )
return successors
def lowerCamelCase ( self , UpperCamelCase__ ) -> Path:
'''simple docstring'''
snake_case : Optional[int] = node
snake_case : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case : Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
__snake_case = GreedyBestFirst(init, goal)
__snake_case = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__snake_case = 2
for elem in grid:
print(elem)
| 117
| 0
|
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any=1_000 ) -> Any:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase = n - 1
UpperCAmelCase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase = 0
while count < prec:
UpperCAmelCase = random.randint(2 , n - 1 )
UpperCAmelCase = bin_exp_mod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if b != 1:
UpperCAmelCase = True
for _ in range(SCREAMING_SNAKE_CASE_ ):
if b == n - 1:
UpperCAmelCase = False
break
UpperCAmelCase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a__ : List[Any] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 51
|
import string
def _lowercase ( lowercase__ ):
for key in range(len(string.ascii_uppercase ) ):
__lowerCAmelCase : Any = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowerCAmelCase : Union[str, Any] = string.ascii_uppercase.find(lowercase__ )
__lowerCAmelCase : int = num - key
if num < 0:
__lowerCAmelCase : List[str] = num + len(string.ascii_uppercase )
__lowerCAmelCase : Union[str, Any] = translated + string.ascii_uppercase[num]
else:
__lowerCAmelCase : Dict = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def _lowercase ( ):
__lowerCAmelCase : str = input('''Encrypted message: ''' )
__lowerCAmelCase : List[Any] = message.upper()
decrypt(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 492
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "Salesforce/blip-image-captioning-base"
__UpperCamelCase = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
__UpperCamelCase = "image_captioner"
__UpperCamelCase = AutoModelForVisionaSeq
__UpperCamelCase = ["image"]
__UpperCamelCase = ["text"]
def __init__( self , *_a , **_a ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*_a , **_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.pre_processor(images=_a , return_tensors="""pt""" )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.model.generate(**_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.pre_processor.batch_decode(_a , skip_special_tokens=_a )[0].strip()
| 533
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase : List[Any] = pytest.mark.integration
@require_faiss
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(_a ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCAmelCase ( self ):
"""simple docstring"""
import faiss
lowerCamelCase = self._create_dummy_dataset()
lowerCamelCase = dset.map(
lambda _a , _a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_a , keep_in_memory=_a )
lowerCamelCase = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase , lowerCamelCase = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
import faiss
lowerCamelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase , lowerCamelCase = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
import faiss
lowerCamelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_a ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase , lowerCamelCase = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(_a , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCamelCase = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
lowerCamelCase = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
lowerCamelCase = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=_a )
lowerCamelCase , lowerCamelCase = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
import faiss
lowerCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase = np.zeros(5 , dtype=np.floataa )
lowerCamelCase = 1
lowerCamelCase , lowerCamelCase = index.search(_a )
self.assertRaises(_a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase , lowerCamelCase = index.search_batch(_a )
self.assertRaises(_a , index.search_batch , queries[0] )
lowerCamelCase = [scores[0] for scores in total_scores]
lowerCamelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
import faiss
lowerCamelCase = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_a ):
lowerCamelCase = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
import faiss
lowerCamelCase = faiss.IndexFlat(5 )
lowerCamelCase = FaissIndex(custom_index=_a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCAmelCase ( self ):
"""simple docstring"""
import faiss
lowerCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_a ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase = np.zeros(5 , dtype=np.floataa )
lowerCamelCase = 1
lowerCamelCase , lowerCamelCase = index.search(_a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a__ ( snake_case__ ) -> Tuple:
import faiss
lowerCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase = """index.faiss"""
lowerCamelCase = F'mock://{index_name}'
index.save(snake_case__ , storage_options=mockfs.storage_options )
lowerCamelCase = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
lowerCamelCase = np.zeros(5 , dtype=np.floataa )
lowerCamelCase = 1
lowerCamelCase , lowerCamelCase = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
lowerCamelCase = Elasticsearch()
lowerCamelCase = {"""acknowledged""": True}
lowerCamelCase = ElasticSearchIndex(es_client=_a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
lowerCamelCase = """foo"""
lowerCamelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
lowerCamelCase , lowerCamelCase = index.search(_a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase = """foo"""
lowerCamelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
lowerCamelCase , lowerCamelCase = index.search(_a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase = ["""foo""", """bar""", """foobar"""]
lowerCamelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
lowerCamelCase , lowerCamelCase = index.search_batch(_a )
lowerCamelCase = [scores[0] for scores in total_scores]
lowerCamelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([1, 1, 1] , _a )
# batched queries with timeout
lowerCamelCase = ["""foo""", """bar""", """foobar"""]
lowerCamelCase = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
lowerCamelCase , lowerCamelCase = index.search_batch(_a , request_timeout=30 )
lowerCamelCase = [scores[0] for scores in total_scores]
lowerCamelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([1, 1, 1] , _a )
| 533
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.