code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_UpperCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_UpperCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_UpperCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_UpperCamelCase , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_UpperCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_UpperCamelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_UpperCamelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
snake_case_ : List[Any] = parser.parse_args()
return args
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
def fn(_UpperCamelCase ):
return tokenizer(examples['''text'''] )
return fn
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Any = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
snake_case_ : Any = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
snake_case_ : Optional[int] = tf.train.Features(feature=_UpperCamelCase )
snake_case_ : Optional[Any] = tf.train.Example(features=_UpperCamelCase )
snake_case_ : Optional[Any] = example.SerializeToString()
records.append(_UpperCamelCase )
return records
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : int = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , args.limit )
snake_case_ : int = dataset.select(range(_UpperCamelCase ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
snake_case_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
snake_case_ : str = os.path.join(args.output_dir , args.split )
if not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
else:
snake_case_ : Optional[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
snake_case_ : Optional[Any] = tokenize_function(_UpperCamelCase )
snake_case_ : List[Any] = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_UpperCamelCase ):
# Concatenate all texts.
snake_case_ : Tuple = {k: sum(examples[k] , [] ) for k in examples.keys()}
snake_case_ : List[str] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
snake_case_ : int = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
snake_case_ : Union[str, Any] = {
k: [t[i : i + args.max_length] for i in range(0 , _UpperCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
snake_case_ : int = dataset_tokenized.map(_UpperCamelCase , batched=_UpperCamelCase , batch_size=1_000 , num_proc=4 )
snake_case_ : str = 0
snake_case_ : Optional[Any] = 0
for shard in range(0 , len(_UpperCamelCase ) , args.shard_size ):
snake_case_ : Any = grouped_dataset[shard : shard + args.shard_size]
snake_case_ : str = len(dataset_snapshot['''input_ids'''] )
snake_case_ : Union[str, Any] = os.path.join(_UpperCamelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
snake_case_ : Dict = get_serialized_examples(_UpperCamelCase )
with tf.io.TFRecordWriter(_UpperCamelCase ) as out_file:
for i in range(len(_UpperCamelCase ) ):
snake_case_ : List[str] = serialized_examples[i]
out_file.write(_UpperCamelCase )
print('''Wrote file {} containing {} records'''.format(_UpperCamelCase , _UpperCamelCase ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
main(args)
| 279
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Any = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ : List[Any] = VideoClassificationPipeline(model=__magic_name__ , image_processor=__magic_name__ , top_k=2 )
snake_case_ : str = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
for example in examples:
snake_case_ : Union[str, Any] = video_classifier(__magic_name__ )
self.assertEqual(
__magic_name__ , [
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
] , )
@require_torch
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Any = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
snake_case_ : str = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
snake_case_ : int = pipeline(
'''video-classification''' , model=__magic_name__ , feature_extractor=__magic_name__ , frame_sampling_rate=4 )
snake_case_ : List[str] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ : Union[str, Any] = video_classifier(__magic_name__ , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
snake_case_ : int = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
| 279
| 1
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase = None ) -> List[Any]:
A_ : int = value
A_ : Node | None = None # Added in order to delete a node easier
A_ : Node | None = None
A_ : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"{self.value}": (self.left, self.right)} , indent=1 )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase = None ) -> Dict:
A_ : Union[str, Any] = root
def __str__( self ) -> str:
return str(self.root )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> None:
if new_children is not None: # reset its kids
A_ : Optional[Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_lowerCamelCase ): # If it is the right children
A_ : Any = new_children
else:
A_ : Union[str, Any] = new_children
else:
A_ : Any = new_children
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self ) -> bool:
return self.root is None
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
A_ : Union[str, Any] = Node(_lowerCamelCase ) # create a new Node
if self.empty(): # if Tree is empty
A_ : Optional[int] = new_node # set its root
else: # Tree is not empty
A_ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
A_ : int = new_node # We insert the new node in a leaf
break
else:
A_ : List[str] = parent_node.left
else:
if parent_node.right is None:
A_ : Optional[int] = new_node
break
else:
A_ : Tuple = parent_node.right
A_ : Union[str, Any] = parent_node
def UpperCAmelCase_ ( self , *_lowerCamelCase ) -> None:
for value in values:
self.__insert(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Node | None:
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
A_ : Tuple = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
A_ : Tuple = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self , _lowerCamelCase = None ) -> Node | None:
if node is None:
if self.root is None:
return None
A_ : List[str] = self.root
if not self.empty():
while node.right is not None:
A_ : Dict = node.right
return node
def UpperCAmelCase_ ( self , _lowerCamelCase = None ) -> Node | None:
if node is None:
A_ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
A_ : str = self.root
while node.left is not None:
A_ : int = node.left
return node
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
A_ : Optional[Any] = self.search(_lowerCamelCase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_lowerCamelCase , _lowerCamelCase )
elif node.left is None: # Has only right children
self.__reassign_nodes(_lowerCamelCase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_lowerCamelCase , node.left )
else:
A_ : Tuple = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
A_ : Union[str, Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self , _lowerCamelCase=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> None:
if node:
self.inorder(_lowerCamelCase , node.left )
arr.append(node.value )
self.inorder(_lowerCamelCase , node.right )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : list[int] = []
self.inorder(_lowerCamelCase , _lowerCamelCase ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCAmelCase ( a_ ) -> list[Node]:
"""simple docstring"""
A_ : List[Any] = []
if curr_node is not None:
A_ : int = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
A_ : int = (8, 3, 6, 1, 1_0, 1_4, 1_3, 4, 7)
A_ : Union[str, Any] = BinarySearchTree()
for i in testlist:
t.insert(a_ )
# Prints all the elements of the list in order traversal
print(a_ )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(a_ )
print(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 368
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCAmelCase ( a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : Dict = []
for part_id in partition_order:
A_ : List[str] = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(a_ ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
A_ : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Optional[int] = spark.range(1_0_0 ).repartition(1 )
A_ : Optional[Any] = Spark(a_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=1_6 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 5_0
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[str] = spark.range(1_0 ).repartition(2 )
A_ : List[str] = [1, 0]
A_ : List[Any] = _generate_iterable_examples(a_ , a_ ) # Reverse the partitions.
A_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , a_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A_ , A_ : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
A_ : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Dict = spark.range(1_0 ).repartition(1 )
A_ : int = SparkExamplesIterable(a_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(a_ ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
A_ : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Union[str, Any] = spark.range(3_0 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A_ : Optional[int] = lambda a_ : x.reverse()
A_ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [2, 1, 0] )
A_ : Any = SparkExamplesIterable(a_ ).shuffle_data_sources(a_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Any = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Tuple = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[Any] = spark.range(2_0 ).repartition(4 )
# Partitions 0 and 2
A_ : str = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
A_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A_ : Optional[Any] = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
A_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[Any] = spark.range(1_0_0 ).repartition(1 )
A_ : str = Spark(a_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
| 164
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
__a = self.vocab_size - 1
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__a = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase ):
__a = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
__a = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
__a = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase ):
__a = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase ):
__a = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase ):
__a = self.num_labels
__a = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_snake_case : List[str] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_snake_case : Any = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_snake_case : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
__a = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
__a = inputs_dict["labels"]
__a = inputs_dict["labels"]
__a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
__a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def a__ ( self ):
__a = OpenAIGPTModelTester(self )
__a = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def a__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(SCREAMING_SNAKE_CASE__ )
__a = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is
__a = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__a = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
| 261
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _lowercase :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : float
_SCREAMING_SNAKE_CASE : TreeNode | None = None
_SCREAMING_SNAKE_CASE : TreeNode | None = None
def UpperCamelCase_ ( snake_case_ : TreeNode | None ) -> bool:
'''simple docstring'''
def is_valid_tree(snake_case_ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(snake_case_ , snake_case_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case_ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
snake_case_ : TreeNode | None , snake_case_ : float , snake_case_ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case_ )
)
return is_binary_search_tree_recursive_check(snake_case_ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229
| 0
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : List[str] = (UnCLIPScheduler,)
def _lowerCamelCase ( self , **__lowerCAmelCase ):
UpperCamelCase__ = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**__lowerCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__lowerCAmelCase )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowerCamelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCAmelCase , prev_timestep=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
UpperCamelCase__ = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(variance_type="""learned_range""" )
UpperCamelCase__ = scheduler_class(**__lowerCAmelCase )
UpperCamelCase__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCAmelCase ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=__lowerCAmelCase ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=__lowerCAmelCase ) - -0.001_0011 < 1E-5
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**__lowerCAmelCase )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
UpperCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCAmelCase ):
# 1. predict noise residual
UpperCamelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
UpperCamelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(25 )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
UpperCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCAmelCase ):
# 1. predict noise residual
UpperCamelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
UpperCamelCase__ = None
else:
UpperCamelCase__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ = scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , prev_timestep=__lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
UpperCamelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
pass
| 87
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase__ = logging.getLogger(__name__)
def _UpperCamelCase (a__ :Union[str, Any] , a__ :Optional[Any] ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case : Optional[str] = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
snake_case : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
snake_case : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case : bool = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , a__ )
# Set seed
set_seed(training_args.seed )
try:
UpperCamelCase__ = processors[data_args.task_name]()
UpperCamelCase__ = processor.get_labels()
UpperCamelCase__ = len(a__ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCamelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(a__ :EvalPrediction ) -> Dict:
UpperCamelCase__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(a__ , p.label_ids )}
# Data collator
UpperCamelCase__ = DataCollatorWithPadding(a__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase__ = Trainer(
model=a__ , args=a__ , train_dataset=a__ , eval_dataset=a__ , compute_metrics=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase__ = trainer.evaluate()
UpperCamelCase__ = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(a__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , a__ , a__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(a__ )
return results
def _UpperCamelCase (a__ :Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 87
| 1
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
snake_case_ : Tuple = logging.get_logger("transformers.models.encodec")
snake_case_ : Any = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
snake_case_ : List[str] = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
snake_case_ : Dict = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
snake_case_ : str = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
snake_case_ : List[str] = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
snake_case_ : Any = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
snake_case_ : Any = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
snake_case_ : Union[str, Any] = []
snake_case_ : List[Any] = []
def A (__A : str , __A : Optional[Any] , __A : List[str] , __A : int , __A : List[Any] ) -> Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
UpperCAmelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
UpperCAmelCase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
elif weight_type == "running_mean":
UpperCAmelCase_ = value
elif weight_type == "running_var":
UpperCAmelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ = value
elif weight_type == "weight_ih_l0":
UpperCAmelCase_ = value
elif weight_type == "weight_hh_l0":
UpperCAmelCase_ = value
elif weight_type == "bias_ih_l0":
UpperCAmelCase_ = value
elif weight_type == "bias_hh_l0":
UpperCAmelCase_ = value
elif weight_type == "weight_ih_l1":
UpperCAmelCase_ = value
elif weight_type == "weight_hh_l1":
UpperCAmelCase_ = value
elif weight_type == "bias_ih_l1":
UpperCAmelCase_ = value
elif weight_type == "bias_hh_l1":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def A (__A : Optional[Any] , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase_ , UpperCAmelCase_ = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A (__A : str , __A : List[str] , __A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCAmelCase_ = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCAmelCase_ = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.info(F"""{name} was ignored""" )
continue
UpperCAmelCase_ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCAmelCase_ , UpperCAmelCase_ = key.split('''.*.''' )
if prefix in name and suffix in name:
UpperCAmelCase_ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(SCREAMING_SNAKE_CASE_ )[0].split('''.''' )[-2]
UpperCAmelCase_ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
UpperCAmelCase_ = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase_ = '''weight_v'''
elif "weight_ih_l0" in name:
UpperCAmelCase_ = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
UpperCAmelCase_ = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
UpperCAmelCase_ = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
UpperCAmelCase_ = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
UpperCAmelCase_ = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
UpperCAmelCase_ = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
UpperCAmelCase_ = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
UpperCAmelCase_ = '''bias_hh_l1'''
elif "bias" in name:
UpperCAmelCase_ = '''bias'''
elif "weight" in name:
UpperCAmelCase_ = '''weight'''
elif "running_mean" in name:
UpperCAmelCase_ = '''running_mean'''
elif "running_var" in name:
UpperCAmelCase_ = '''running_var'''
elif "num_batches_tracked" in name:
UpperCAmelCase_ = '''num_batches_tracked'''
else:
UpperCAmelCase_ = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def A (__A : Tuple , __A : Optional[int] , __A : int , __A : Optional[int]=None , __A : int=None , ) -> Optional[int]:
"""simple docstring"""
if config_path is not None:
UpperCAmelCase_ = EncodecConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCAmelCase_ = [8, 5, 4, 4]
UpperCAmelCase_ = [2.2]
UpperCAmelCase_ = 64
UpperCAmelCase_ = 32000
UpperCAmelCase_ = 2048
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
elif model_name == "encodec_48khz":
UpperCAmelCase_ = [8, 5, 4, 2]
UpperCAmelCase_ = [3.0, 6.0, 12.0, 24.0]
UpperCAmelCase_ = 48000
UpperCAmelCase_ = 2
UpperCAmelCase_ = False
UpperCAmelCase_ = '''time_group_norm'''
UpperCAmelCase_ = True
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.01
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCAmelCase_ = EncodecModel(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = torch.load(SCREAMING_SNAKE_CASE_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCAmelCase_ = original_checkpoint['''best_state''']
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(SCREAMING_SNAKE_CASE_ )
model.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
snake_case_ : Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 51
|
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowercase (SCREAMING_SNAKE_CASE_ : BertModel , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
SCREAMING_SNAKE_CASE = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
SCREAMING_SNAKE_CASE = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ : str ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return F'bert/{name}'
def create_tf_var(SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : tf.Session ):
SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
SCREAMING_SNAKE_CASE = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
SCREAMING_SNAKE_CASE = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
SCREAMING_SNAKE_CASE = torch_tensor.T
SCREAMING_SNAKE_CASE = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = session.run(SCREAMING_SNAKE_CASE_ )
print(F'Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}' )
SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ) -> Any:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Directory in which to save tensorflow model' )
SCREAMING_SNAKE_CASE = parser.parse_args(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 113
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345
|
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : Dict = deepcopy(UpperCAmelCase_)
elif os.path.exists(UpperCAmelCase_):
with io.open(UpperCAmelCase_ , 'r' , encoding='utf-8') as f:
a : Union[str, Any] = json.load(UpperCAmelCase_)
else:
try:
a : Union[str, Any] = baseaa.urlsafe_baadecode(UpperCAmelCase_).decode('utf-8')
a : List[str] = json.loads(UpperCAmelCase_)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""")
a : Optional[int] = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self.get_value('zero_optimization.stage' , -1)
# offload
a : Any = False
if self.is_zeroa() or self.is_zeroa():
a : Tuple = set(['cpu', 'nvme'])
a : int = set(
[
self.get_value('zero_optimization.offload_optimizer.device'),
self.get_value('zero_optimization.offload_param.device'),
])
if len(offload_devices & offload_devices_valid) > 0:
a : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.config
# find the config node of interest if it exists
a : int = ds_key_long.split('.')
a : Union[str, Any] = nodes.pop()
for node in nodes:
a : Union[str, Any] = config.get(UpperCAmelCase_)
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=None):
"""simple docstring"""
a , a : int = self.find_config_node(UpperCAmelCase_)
if config is None:
return default
return config.get(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=False):
"""simple docstring"""
a : Any = self.config
# find the config node of interest if it exists
a : Optional[Any] = ds_key_long.split('.')
for node in nodes:
a : List[str] = config
a : int = config.get(UpperCAmelCase_)
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""")
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str):
"""simple docstring"""
a : List[str] = self.get_value(UpperCAmelCase_)
return False if value is None else bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = self.get_value(UpperCAmelCase_)
return False if value is None else not bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return self._offload
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : int):
"""simple docstring"""
a : Union[str, Any] = engine
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
self.engine.backward(UpperCAmelCase_ , **UpperCAmelCase_)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , device_placement=UpperCAmelCase_ , scaler=UpperCAmelCase_)
a : List[str] = hasattr(self.optimizer , 'overflow')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict=None):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = params
a : str = lr
a : Tuple = weight_decay
a : Dict = kwargs
class UpperCamelCase :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = optimizer
a : Tuple = total_num_steps
a : Optional[Any] = warmup_num_steps
a : List[str] = kwargs
| 345
| 1
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __A (unittest.TestCase , lowerCamelCase__):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = load_tool("""text-classification""" )
self.tool.setup()
snake_case_ = load_tool("""text-classification""" , remote=UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.tool("""That\'s quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(UpperCAmelCase_ , """positive""" )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.remote_tool("""That\'s quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(UpperCAmelCase_ , """positive""" )
def lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
snake_case_ = self.tool(text="""That\'s quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(UpperCAmelCase_ , """positive""" )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.remote_tool(text="""That\'s quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(UpperCAmelCase_ , """positive""" )
| 347
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
@staticmethod
def lowercase__ ( *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
lowercase : int =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =pipeline(
'''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCamelCase_ =[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =object_detector(examples[0], threshold=0.0 )
lowerCamelCase_ =len(lowerCAmelCase )
self.assertGreater(lowerCAmelCase, 0 )
self.assertEqual(
lowerCAmelCase, [
{
'''score''': ANY(lowerCAmelCase ),
'''label''': ANY(lowerCAmelCase ),
'''box''': {'''xmin''': ANY(lowerCAmelCase ), '''ymin''': ANY(lowerCAmelCase ), '''xmax''': ANY(lowerCAmelCase ), '''ymax''': ANY(lowerCAmelCase )},
}
for i in range(lowerCAmelCase )
], )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pipeline(
'''zero-shot-object-detection''', model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCamelCase_ =object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=0.6_4, )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=4 ), [
{'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
], )
lowerCamelCase_ =object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
], threshold=0.6_4, )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=4 ), [
[
{'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
], )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pipeline('''zero-shot-object-detection''' )
lowerCamelCase_ =object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=4 ), [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
], )
lowerCamelCase_ =object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
], )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=4 ), [
[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
], )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =0.2
lowerCamelCase_ =pipeline('''zero-shot-object-detection''' )
lowerCamelCase_ =object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], threshold=lowerCAmelCase, )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=4 ), [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
], )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =2
lowerCamelCase_ =pipeline('''zero-shot-object-detection''' )
lowerCamelCase_ =object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''', candidate_labels=['''cat''', '''remote''', '''couch'''], top_k=lowerCAmelCase, )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=4 ), [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
], )
| 75
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase__ = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase__ = model_name.find('patch' )
UpperCAmelCase__ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
UpperCAmelCase__ = XCLIPVisionConfig(patch_size=snake_case__ , num_frames=snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
UpperCAmelCase__ = 12
UpperCAmelCase__ = 10_24
UpperCAmelCase__ = 40_96
UpperCAmelCase__ = 16
UpperCAmelCase__ = 24
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = 3_36
UpperCAmelCase__ = XCLIPConfig.from_text_vision_configs(snake_case__ , snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
return config
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase__ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
UpperCAmelCase__ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
UpperCAmelCase__ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
UpperCAmelCase__ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
UpperCAmelCase__ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
UpperCAmelCase__ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
UpperCAmelCase__ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase__ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
UpperCAmelCase__ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase__ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
UpperCAmelCase__ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
UpperCAmelCase__ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
UpperCAmelCase__ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
UpperCAmelCase__ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
UpperCAmelCase__ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
UpperCAmelCase__ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
UpperCAmelCase__ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase__ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
UpperCAmelCase__ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase__ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
UpperCAmelCase__ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
UpperCAmelCase__ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: List[Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(snake_case__ )
if "attn.in_proj" in key:
UpperCAmelCase__ = key.split('.' )
if key.startswith('visual' ):
UpperCAmelCase__ = key_split[3]
UpperCAmelCase__ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[
:dim
]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
elif key.startswith('mit' ):
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[dim : dim * 2, :]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[dim : dim * 2]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = rename_key(snake_case__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase__ = val.T
UpperCAmelCase__ = val
return orig_state_dict
def UpperCamelCase_( snake_case__: Tuple ) -> Optional[Any]:
if num_frames == 8:
UpperCAmelCase__ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
UpperCAmelCase__ = 'eating_spaghetti.npy'
elif num_frames == 32:
UpperCAmelCase__ = 'eating_spaghetti_32_frames.npy'
UpperCAmelCase__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=snake_case__ , repo_type='dataset' , )
UpperCAmelCase__ = np.load(snake_case__ )
return list(snake_case__ )
def UpperCamelCase_( snake_case__: Tuple , snake_case__: str=None , snake_case__: Union[str, Any]=False ) -> List[Any]:
UpperCAmelCase__ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
UpperCAmelCase__ = model_to_url[model_name]
UpperCAmelCase__ = 8
if "16-frames" in model_name:
UpperCAmelCase__ = 16
elif "shot" in model_name:
UpperCAmelCase__ = 32
UpperCAmelCase__ = get_xclip_config(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase__ = 'pytorch_model.bin'
gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
else:
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(snake_case__ )['model']
UpperCAmelCase__ = convert_state_dict(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = model.load_state_dict(snake_case__ , strict=snake_case__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase__ = 3_36 if model_name == 'xclip-large-patch14-16-frames' else 2_24
UpperCAmelCase__ = VideoMAEImageProcessor(size=snake_case__ )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = XCLIPProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase__ = prepare_video(snake_case__ )
UpperCAmelCase__ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=snake_case__ , return_tensors='pt' , padding=snake_case__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase__ = model(**snake_case__ )
# Verify outputs
UpperCAmelCase__ = outputs.logits_per_video
UpperCAmelCase__ = logits_per_video.softmax(dim=1 )
print('Probs:' , snake_case__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase__ = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase__ = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase__ = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase__ = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase__ = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase__ = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase__ = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase__ = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase__ = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase__ = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase__ = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase__ = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase__ = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] )
else:
raise ValueError(f"Model name {model_name} not supported" )
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(snake_case__ , organization='nielsr' )
processor.push_to_hub(snake_case__ , organization='nielsr' )
slow_tokenizer.push_to_hub(snake_case__ , organization='nielsr' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_UpperCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 335
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ : List[str] = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
lowerCAmelCase_ : str = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ : List[Any] = {f"""funnel-transformer/{name}""": 5_12 for name in _model_names}
lowerCAmelCase_ : List[Any] = {f"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_INIT_CONFIGURATION
__a =FunnelTokenizer
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =2
def __init__( self : str , __a : Optional[Any]=None , __a : Optional[Any]=None , __a : List[str]=True , __a : str="<unk>" , __a : List[Any]="<sep>" , __a : Optional[int]="<pad>" , __a : str="<cls>" , __a : Union[str, Any]="<mask>" , __a : Dict="<s>" , __a : Tuple="</s>" , __a : Union[str, Any]=True , __a : str=True , __a : int=None , __a : int="##" , **__a : List[Any] , ):
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , bos_token=__a , eos_token=__a , clean_text=__a , tokenize_chinese_chars=__a , strip_accents=__a , wordpieces_prefix=__a , **__a , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __a ) != do_lower_case
or normalizer_state.get("strip_accents" , __a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __a ) != tokenize_chinese_chars
):
_a = getattr(__a , normalizer_state.pop("type" ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**__a )
_a = do_lower_case
def UpperCamelCase__ ( self : Any , __a : Any , __a : Dict=None ):
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self : str , __a : List[int] , __a : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self : str , __a : str , __a : Optional[str] = None ):
_a = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 63
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='big_bird'
def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
_a = use_cache
_a = rescale_embeddings
_a = attention_type
_a = use_bias
_a = block_size
_a = num_random_blocks
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 63
| 1
|
from math import ceil, sqrt
def __UpperCamelCase ( lowerCAmelCase__ : int = 1_0_0_0_0_0_0 ):
__a : Union[str, Any] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__a : List[str] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__a : Optional[Any] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 361
|
import os
import sys
import unittest
lowercase__ =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase__ =os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase__ =os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : List[Any] ):
__a : str = get_test_to_tester_mapping(snake_case_ )
__a : Tuple = get_test_to_tester_mapping(snake_case_ )
__a : Union[str, Any] = {'''BertModelTest''': '''BertModelTester'''}
__a : Tuple = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
def lowerCAmelCase (self : str ):
__a : Optional[int] = get_model_to_test_mapping(snake_case_ )
__a : Any = get_model_to_test_mapping(snake_case_ )
__a : List[Any] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
__a : Dict = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
def lowerCAmelCase (self : int ):
__a : Any = get_model_to_tester_mapping(snake_case_ )
__a : List[str] = get_model_to_tester_mapping(snake_case_ )
__a : Any = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
__a : int = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
| 90
| 0
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 271
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Optional[Any]=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
_a : str = nn.Parameter(__a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
_a : Any = nn.Parameter(__a )
def UpperCAmelCase_ (__a : int , __a : Optional[Any] , __a : int ):
"""simple docstring"""
_a : Tuple = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : Dict = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Optional[Any] , __a : Optional[int] , __a : List[str] ):
"""simple docstring"""
_a : Dict = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : str = np.asarray(weights[2] )
_a : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Any , __a : Any , __a : Optional[Any] ):
"""simple docstring"""
_a : List[str] = weights[0][0][0]
_a : List[Any] = np.asarray(layer_norm_a[0] )
_a : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# lsh weights + output
_a : List[str] = weights[0][1]
if len(__a ) < 4:
set_layer_weights_in_torch_lsh(__a , torch_block.attention , __a )
else:
set_layer_weights_in_torch_local(__a , torch_block.attention , __a )
# intermediate weighs
_a : Optional[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(__a ) == 4:
_a : Union[str, Any] = intermediate_weights[2]
# layernorm 2
_a : Any = np.asarray(intermediate_weights[0][0] )
_a : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# intermediate dense
_a : Any = np.asarray(intermediate_weights[1][0] )
_a : Any = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
# intermediate out
_a : Optional[int] = np.asarray(intermediate_weights[4][0] )
_a : int = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Dict , __a : Dict , __a : List[Any] ):
"""simple docstring"""
_a : Optional[int] = torch_model.reformer
# word embeds
_a : Tuple = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__a ) , )
if isinstance(weights[3] , __a ):
_a : Any = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a : List[Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
_a : Any = nn.Parameter(torch.tensor(__a ) )
_a : List[str] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a : Tuple = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__a , __a , __a )
# output layer norm
_a : Optional[Any] = np.asarray(weights[7][0] )
_a : int = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# output embeddings
_a : List[str] = np.asarray(weights[9][0] )
_a : int = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Tuple , __a : Optional[Any] , __a : Dict ):
"""simple docstring"""
_a : List[Any] = ReformerConfig.from_json_file(__a )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : int = ReformerModelWithLMHead(__a )
with open(__a , 'rb' ) as f:
_a : Optional[Any] = pickle.load(__a )['weights']
set_model_weights_in_torch(__a , __a , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 271
| 1
|
'''simple docstring'''
from __future__ import annotations
__lowerCamelCase = 1.6021e-19 # units = C
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> float:
if edge <= 0 or not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> float:
if edge <= 0 or not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase (_UpperCamelCase ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class A__ ( _lowerCamelCase):
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=_SCREAMING_SNAKE_CASE , help='Name of the model to download' )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = model
__lowerCAmelCase : int = cache
__lowerCAmelCase : List[Any] = force
__lowerCAmelCase : Any = trust_remote_code
def __lowerCamelCase ( self ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 86
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = DiTPipeline
A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ : Tuple = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = AutoencoderKL()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler()
__lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.manual_seed(0 )
__lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase : Dict = ['vase', 'umbrella']
__lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 86
| 1
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
UpperCAmelCase_ : List[str] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase_ : Optional[int] = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
UpperCAmelCase_ : Optional[Any] = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase_ : Tuple = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
UpperCAmelCase_ : Any = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def UpperCamelCase ( _A : Optional[Any] )-> str:
"""simple docstring"""
A__ = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , _A )
return [m.group(0 ) for m in matches]
def UpperCamelCase ( )-> Union[str, Any]:
"""simple docstring"""
A__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
A__ = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
A__ = collections.defaultdict(_A )
A__ = collections.defaultdict(_A )
A__ = collections.defaultdict(_A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_A ):
A__ = None
if _re_tf_models.match(_A ) is not None:
A__ = tf_models
A__ = _re_tf_models.match(_A ).groups()[0]
elif _re_flax_models.match(_A ) is not None:
A__ = flax_models
A__ = _re_flax_models.match(_A ).groups()[0]
elif _re_pt_models.match(_A ) is not None:
A__ = pt_models
A__ = _re_pt_models.match(_A ).groups()[0]
if lookup_dict is not None:
while len(_A ) > 0:
if attr_name in model_prefix_to_model_type:
A__ = True
break
# Try again after removing the last word in the name
A__ = "".join(camel_case_split(_A )[:-1] )
A__ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
A__ = list(_A )
all_models.sort()
A__ = {"model_type": all_models}
A__ = [pt_models[t] for t in all_models]
A__ = [tf_models[t] for t in all_models]
A__ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
A__ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
A__ = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
A__ = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
A__ = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
A__ = "AutoTokenizer"
A__ = [processors[t] for t in all_models]
return pd.DataFrame(_A )
def UpperCamelCase ( _A : List[str] )-> Optional[int]:
"""simple docstring"""
A__ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
A__ = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
A__ = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(_A , _A , _A ):
# The type of pipeline may not exist in this framework
if not hasattr(_A , _A ):
continue
# First extract all model_names
A__ = []
for name in getattr(_A , _A ).values():
if isinstance(_A , _A ):
model_names.append(_A )
else:
model_names.extend(list(_A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def UpperCamelCase ( _A : Union[str, Any] , _A : Optional[int] )-> Dict:
"""simple docstring"""
A__ = get_frameworks_table()
A__ = Dataset.from_pandas(_A )
A__ = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=_A )
A__ = Dataset.from_json(_A )
A__ = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(_A ) )
}
A__ = update_pipeline_and_auto_class_table(_A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
A__ = sorted(table.keys() )
A__ = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
A__ = Dataset.from_pandas(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_A , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(_A , "pipeline_tags.json" ) )
if commit_sha is not None:
A__ = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
A__ = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=_A , repo_type="dataset" , token=_A , commit_message=_A , )
def UpperCamelCase ( )-> Tuple:
"""simple docstring"""
A__ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
A__ = transformers_module.pipelines.SUPPORTED_TASKS
A__ = []
for key in pipeline_tasks:
if key not in in_table:
A__ = pipeline_tasks[key]["pt"]
if isinstance(_A , (list, tuple) ):
A__ = model[0]
A__ = model.__name__
if model not in in_table.values():
missing.append(_A )
if len(_A ) > 0:
A__ = ", ".join(_A )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
UpperCAmelCase_ : str = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 358
|
def UpperCamelCase ( _A : str , _A : str )-> str:
"""simple docstring"""
A__ = len(_A )
A__ = len(_A )
A__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
A__ = []
for char_count in range(_A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_A )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 198
| 0
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Optional[int] ):
__a : List[str] = '''laion/clap-htsat-unfused'''
__a : Tuple = tempfile.mkdtemp()
def lowerCAmelCase (self : Any , **snake_case_ : str ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase (self : Tuple , **snake_case_ : int ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase (self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase (self : Tuple ):
__a : str = self.get_tokenizer()
__a : List[Any] = self.get_feature_extractor()
__a : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
__a : int = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase (self : Optional[int] ):
__a : Optional[Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__a : int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__a : Optional[Any] = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
__a : int = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase (self : Optional[int] ):
__a : Tuple = self.get_feature_extractor()
__a : Union[str, Any] = self.get_tokenizer()
__a : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
__a : Optional[int] = floats_list((3, 1_0_0_0) )
__a : Any = feature_extractor(snake_case_ , return_tensors='''np''' )
__a : Union[str, Any] = processor(audios=snake_case_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase (self : Any ):
__a : Optional[Any] = self.get_feature_extractor()
__a : int = self.get_tokenizer()
__a : Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
__a : int = '''This is a test string'''
__a : Union[str, Any] = processor(text=snake_case_ )
__a : int = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase (self : List[str] ):
__a : Optional[int] = self.get_feature_extractor()
__a : str = self.get_tokenizer()
__a : int = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
__a : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : List[Any] = processor.batch_decode(snake_case_ )
__a : Any = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase (self : Dict ):
__a : str = self.get_feature_extractor()
__a : Dict = self.get_tokenizer()
__a : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
| 216
|
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase__ =getLogger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 8 , lowerCAmelCase__ : int = 1_0_2_4 , lowerCAmelCase__ : Union[str, Any]="val" , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Union[str, Any]="summarization" , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : int="" , **lowerCAmelCase__ : int , ):
__a : List[Any] = str(lowerCAmelCase__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=lowerCAmelCase__ )
__a : Tuple = Path(lowerCAmelCase__ )
__a : Dict = save_dir.joinpath(f"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase__ )
__a : Dict = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ ).cuda()
if fpaa:
__a : str = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase__ , lowerCAmelCase__ ) # update config with task specific params
__a : List[str] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__a : Dict = num_return_sequences
__a : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
__a : Dict = tokenizer.model_max_length
if prefix is None:
__a : Dict = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__a : List[Any] = SeqaSeqDataset(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_target_length=1_0_2_4 , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__a : Tuple = ds.make_sortish_sampler(lowerCAmelCase__ , distributed=lowerCAmelCase__ , add_extra_examples=lowerCAmelCase__ , shuffle=lowerCAmelCase__ )
__a : List[Any] = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=ds.collate_fn )
__a : List[Any] = []
for batch in tqdm(lowerCAmelCase__ ):
__a : Any = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , **lowerCAmelCase__ , )
__a : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
__a : int = batch['''ids''']
if num_return_sequences > 1:
__a : List[str] = chunks(lowerCAmelCase__ , lowerCAmelCase__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase__ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(lowerCAmelCase__ , lowerCAmelCase__ )
return results, sampler.num_replicas
def __UpperCamelCase ( ):
__a : str = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=lowerCAmelCase__ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=lowerCAmelCase__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=lowerCAmelCase__ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument(
'''--type_path''' , type=lowerCAmelCase__ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=lowerCAmelCase__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=lowerCAmelCase__ , default=8 , required=lowerCAmelCase__ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=lowerCAmelCase__ , default=6_0_0 , required=lowerCAmelCase__ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument('''--tgt_lang''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
'''--prefix''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__a : int = time.time()
__a , __a : Tuple = parser.parse_known_args()
__a : Optional[int] = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase__ )
if generate_kwargs and args.local_rank <= 0:
print(f"parsed the following generate kwargs: {generate_kwargs}" )
__a : Union[str, Any] = Path(args.save_dir + '''_tmp''' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) # this handles locking.
__a : Dict = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__a : Optional[Any] = {}
if args.src_lang is not None:
__a : int = args.src_lang
if args.tgt_lang is not None:
__a : Optional[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase__ )
__a , __a : Tuple = eval_data_dir(
args.data_dir , lowerCAmelCase__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , )
if args.local_rank <= 0:
__a : int = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase__ )
__a : List[str] = gather_results_from_each_node(lowerCAmelCase__ , lowerCAmelCase__ , args.sync_timeout )
__a : int = combine_partial_results(lowerCAmelCase__ )
if args.num_return_sequences > 1:
__a : List[Any] = save_dir.joinpath('''pseudolabel_results.json''' )
print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase__ , lowerCAmelCase__ )
return
__a : Any = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(lowerCAmelCase__ ) as f:
__a : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase__ )]
# Calculate metrics, save metrics, and save _generations.txt
__a : str = '''translation''' in args.task
__a : List[str] = calculate_bleu if calc_bleu else calculate_rouge
__a : Any = '''bleu''' if calc_bleu else '''rouge'''
__a : Dict = score_fn(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Dict = len(lowerCAmelCase__ )
__a : str = time.time() - start_time
__a : List[str] = round(runtime / metrics['''n_obs'''] , 4 )
__a : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__a : Optional[int] = save_dir.joinpath(f"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ )
print(lowerCAmelCase__ )
write_txt_file(lowerCAmelCase__ , save_dir.joinpath(f"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase__ , save_dir.joinpath(f"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
__a : Optional[int] = []
for partial_result in partial_results:
records.extend(lowerCAmelCase__ )
__a : Tuple = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x["id"] )
__a : Tuple = [x['''pred'''] for x in records]
return preds
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ):
# WAIT FOR lots of .json files
__a : Tuple = time.time()
logger.info('''waiting for all nodes to finish''' )
__a : Optional[int] = None
while (time.time() - start_wait) < timeout:
__a : Optional[int] = list(save_dir.glob('''rank_*.json''' ) )
if len(lowerCAmelCase__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__a : Tuple = lmap(lowerCAmelCase__ , lowerCAmelCase__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 216
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Optional[int] ={'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] =['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple =[
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118
|
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE = [[0 for _ in range(a__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__SCREAMING_SNAKE_CASE = 1
for n in range(m + 1 ):
for k in range(1 , a__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowerCAmelCase__ : Optional[Any] =int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowerCAmelCase__ : str =int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 118
| 1
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 133
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class A ( __UpperCAmelCase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : str = RetriBertTokenizer
lowerCamelCase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase__ ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase__ , normalizer_state.pop("""type""" ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase__ )
lowercase__ = do_lower_case
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Dict:
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 164
| 0
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __A ( a_ :List[Any]=None , a_ :Tuple=None) -> List[Any]:
return field(default_factory=lambda: default , metadata=a_)
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = field(
metadata={'''help''': '''The csv file to plot.'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
__lowerCAmelCase = list_field(
default=_UpperCamelCase , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def __A ( a_ :Optional[Any]) -> Any:
try:
int(a_)
return True
except ValueError:
return False
def __A ( a_ :List[Any]) -> Any:
try:
float(a_)
return True
except ValueError:
return False
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
__a : Dict = args
__a : Tuple = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
__a : int = csv.DictReader(_UpperCAmelCase )
for row in reader:
__a : Union[str, Any] = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
__a : Optional[int] = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
__a : Optional[Any] = float(row['''result'''] )
def _lowerCamelCase ( self ):
__a , __a : Optional[int] = plt.subplots()
__a : str = '''Time usage''' if self.args.is_time else '''Memory usage'''
__a : str = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__a : str = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
__a : Dict = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
__a : Dict = self.result_dict[model_name]['''result''']
((__a) , (__a)) : List[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__a : Any = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__a : Optional[int] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_UpperCAmelCase , )
else:
__a : Dict = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__a) , (__a)) : Union[str, Any] = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
__a : Any = np.asarray(_UpperCAmelCase , _UpperCAmelCase )[: len(_UpperCAmelCase )]
plt.scatter(
_UpperCAmelCase , _UpperCAmelCase , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(_UpperCAmelCase , _UpperCAmelCase , '''--''' )
title_str += f""" {label_model_name} vs."""
__a : Optional[Any] = title_str[:-4]
__a : Optional[int] = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_UpperCAmelCase )
plt.xlabel(_UpperCAmelCase )
plt.ylabel(_UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __A ( ) -> List[str]:
__a : List[str] = HfArgumentParser(a_)
__a : Optional[int] = parser.parse_args_into_dataclasses()[0]
__a : Tuple = Plot(args=a_)
plot.plot()
if __name__ == "__main__":
main()
| 188
|
"""simple docstring"""
def __A ( a_ :int) -> Union[str, Any]:
__a : int = []
__a : Dict = []
__a : str = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__a : Tuple = len(a_) if (len(a_) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8) , '''Stack'''.center(a_) , '''Postfix'''.center(a_) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(a_) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(a_) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(a_) == 0:
stack.append(a_) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(a_) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(a_) # push x to stack
print(
x.center(8) , (''''''.join(a_)).ljust(a_) , (''''''.join(a_)).ljust(a_) , sep=''' | ''' , ) # Output in tabular format
while len(a_) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
''' '''.center(8) , (''''''.join(a_)).ljust(a_) , (''''''.join(a_)).ljust(a_) , sep=''' | ''' , ) # Output in tabular format
return "".join(a_) # return Postfix as str
def __A ( a_ :int) -> List[Any]:
__a : Dict = list(infix[::-1]) # reverse the infix equation
for i in range(len(a_)):
if infix[i] == "(":
__a : Union[str, Any] = ''')''' # change "(" to ")"
elif infix[i] == ")":
__a : List[str] = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(a_)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
A = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 188
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case_ ( __A ):
__A : List[Any] = "blenderbot-small"
__A : Tuple = ["past_key_values"]
__A : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , lowercase_ : Any=5_02_65 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Optional[int]=8 , lowercase_ : Tuple=20_48 , lowercase_ : Any=16 , lowercase_ : Optional[int]=8 , lowercase_ : Any=20_48 , lowercase_ : Any=16 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int="gelu" , lowercase_ : str=5_12 , lowercase_ : str=0.1 , lowercase_ : Optional[int]=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : str=1 , lowercase_ : int=False , lowercase_ : Optional[int]=0 , lowercase_ : Tuple=1 , lowercase_ : int=2 , lowercase_ : List[str]=2 , **lowercase_ : Tuple , ) -> Union[str, Any]:
lowercase__ : Any = vocab_size
lowercase__ : int = max_position_embeddings
lowercase__ : Optional[Any] = d_model
lowercase__ : List[str] = encoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : List[Any] = encoder_attention_heads
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : Optional[Any] = decoder_layers
lowercase__ : Union[str, Any] = decoder_attention_heads
lowercase__ : int = dropout
lowercase__ : Optional[int] = attention_dropout
lowercase__ : Dict = activation_dropout
lowercase__ : Union[str, Any] = activation_function
lowercase__ : Dict = init_std
lowercase__ : int = encoder_layerdrop
lowercase__ : List[str] = decoder_layerdrop
lowercase__ : str = use_cache
lowercase__ : Dict = encoder_layers
lowercase__ : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
class snake_case_ ( __A ):
@property
def __UpperCamelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowercase__ : Tuple = {0: "batch"}
lowercase__ : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowercase__ : Dict = {0: "batch", 1: "decoder_sequence"}
lowercase__ : Tuple = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase__ : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowercase__ , lowercase__ : Any = self.num_layers
for i in range(lowercase_ ):
lowercase__ : List[str] = {0: "batch", 2: "past_sequence + sequence"}
lowercase__ : Any = {0: "batch", 2: "past_sequence + sequence"}
else:
lowercase__ : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __UpperCamelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : Dict = super().outputs
else:
lowercase__ : List[str] = super(lowercase_ , self ).outputs
if self.use_past:
lowercase__ , lowercase__ : Optional[Any] = self.num_layers
for i in range(lowercase_ ):
lowercase__ : Dict = {0: "batch", 2: "past_sequence + sequence"}
lowercase__ : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCamelCase ( self : Tuple , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowercase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Generate decoder inputs
lowercase__ : str = seq_length if not self.use_past else 1
lowercase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowercase__ : Union[str, Any] = dict(**lowercase_ , **lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase__ , lowercase__ : Union[str, Any] = common_inputs["input_ids"].shape
lowercase__ : Optional[int] = common_inputs["decoder_input_ids"].shape[1]
lowercase__ , lowercase__ : List[str] = self.num_attention_heads
lowercase__ : Dict = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase__ : List[str] = decoder_seq_length + 3
lowercase__ : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase__ : Tuple = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowercase_ , lowercase_ )] , dim=1 )
lowercase__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase__ , lowercase__ : List[str] = self.num_layers
lowercase__ : List[Any] = min(lowercase_ , lowercase_ )
lowercase__ : List[Any] = max(lowercase_ , lowercase_ ) - min_num_layers
lowercase__ : int = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
lowercase__ : str = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowercase_ , lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowercase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase__ , lowercase__ : str = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowercase__ : Dict = seqlen + 2
lowercase__ , lowercase__ : List[str] = self.num_layers
lowercase__ , lowercase__ : Optional[Any] = self.num_attention_heads
lowercase__ : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase__ : Optional[int] = common_inputs["attention_mask"].dtype
lowercase__ : List[Any] = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
lowercase__ : Dict = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ : List[Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(lowercase_ )
lowercase__ : List[Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
lowercase__ : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase__ : Union[str, Any] = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) )
return common_inputs
def __UpperCamelCase ( self : str , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
elif self.task == "causal-lm":
lowercase__ : List[str] = self._generate_dummy_inputs_for_causal_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
else:
lowercase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
return common_inputs
def __UpperCamelCase ( self : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ) -> Any:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : Dict = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
lowercase__ : str = super(lowercase_ , self )._flatten_past_key_values_(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 87
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Union[str, Any] = LEDTokenizer
__A : Union[str, Any] = LEDTokenizerFast
__A : Optional[Any] = True
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
lowercase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase ( self : Tuple ) -> int:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
self.assertIn("input_ids" , lowercase_ )
self.assertIn("attention_mask" , lowercase_ )
self.assertNotIn("labels" , lowercase_ )
self.assertNotIn("decoder_attention_mask" , lowercase_ )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : Union[str, Any] = ["A long paragraph for summarization."]
lowercase__ : List[Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" )
lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" )
lowercase__ : Optional[int] = inputs["input_ids"]
lowercase__ : str = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : int = ["Summary of the text.", "Another summary."]
lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ )
lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]]
lowercase__ : Any = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
pass
def __UpperCamelCase ( self : int ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 87
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = data
def __iter__( self ):
"""simple docstring"""
for element in self.data:
yield element
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any]=True ) -> List[Any]:
'''simple docstring'''
lowercase = Accelerator(even_batches=lowerCAmelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def UpperCAmelCase__ ( lowerCAmelCase__ :Accelerator , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :bool = False ) -> Dict:
'''simple docstring'''
if iterable:
lowercase = DummyIterableDataset(torch.as_tensor(range(lowerCAmelCase__ ) ) )
else:
lowercase = TensorDataset(torch.as_tensor(range(lowerCAmelCase__ ) ) )
lowercase = DataLoader(lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowercase = accelerator.prepare(lowerCAmelCase__ )
return dl
def UpperCAmelCase__ ( lowerCAmelCase__ :Accelerator , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :List[int] , ) -> Dict:
'''simple docstring'''
lowercase = create_dataloader(accelerator=lowerCAmelCase__ , dataset_size=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowercase = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def UpperCAmelCase__ ( ) -> Any:
'''simple docstring'''
lowercase = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def UpperCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
lowercase = create_accelerator(even_batches=lowerCAmelCase__ )
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
lowercase = create_accelerator(even_batches=lowerCAmelCase__ )
lowercase = torch.nn.Linear(1 , 1 )
lowercase = accelerator.prepare(lowerCAmelCase__ )
lowercase = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
lowercase = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(lowerCAmelCase__ ):
lowercase = ddp_model(batch[0].float() )
lowercase = output.sum()
loss.backward()
batch_idxs.append(lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] ) -> Tuple:
'''simple docstring'''
with warnings.catch_warnings(record=lowerCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , lowerCAmelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def UpperCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase = True
lowercase = False
lowercase = create_accelerator(even_batches=lowerCAmelCase__ )
lowercase = torch.nn.Linear(1 , 1 )
lowercase = accelerator.prepare(lowerCAmelCase__ )
lowercase = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
lowercase = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase__ ):
lowercase = train_dl.batch_sampler.even_batches
lowercase = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
lowercase = True
lowercase = False
lowercase = create_accelerator(even_batches=lowerCAmelCase__ )
lowercase = torch.nn.Linear(1 , 1 )
lowercase = accelerator.prepare(lowerCAmelCase__ )
create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=lowerCAmelCase__ )
lowercase = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase__ ):
lowercase = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase = create_accelerator()
lowercase = torch.nn.Linear(1 , 1 )
lowercase = accelerator.prepare(lowerCAmelCase__ )
create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=lowerCAmelCase__ )
with warnings.catch_warnings(record=lowerCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase__ ):
pass
assert issubclass(w[-1].category , lowerCAmelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def UpperCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
lowercase = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
lowercase = accelerator.state.distributed_type
lowercase = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(lowerCAmelCase__ )
lowercase = original_state
if __name__ == "__main__":
main()
| 32
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 32
| 1
|
"""simple docstring"""
import string
import numpy
def _snake_case ( lowercase__ , lowercase__ ):
return b if a == 0 else greatest_common_divisor(b % a , lowercase__ )
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowerCamelCase__ = numpy.vectorize(lambda lowercase : x % 36 )
lowerCamelCase__ = numpy.vectorize(lowercase )
def __init__( self , lowercase ):
_lowerCamelCase : Optional[int] = self.modulus(lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_lowerCamelCase : Dict = encrypt_key.shape[0]
def A_ ( self , lowercase ):
return self.key_string.index(lowercase )
def A_ ( self , lowercase ):
return self.key_string[round(lowercase )]
def A_ ( self ):
_lowerCamelCase : Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowerCamelCase : Dict = det % len(self.key_string )
_lowerCamelCase : List[Any] = len(self.key_string )
if greatest_common_divisor(lowercase , len(self.key_string ) ) != 1:
_lowerCamelCase : List[Any] = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(lowercase )
def A_ ( self , lowercase ):
_lowerCamelCase : int = [char for char in text.upper() if char in self.key_string]
_lowerCamelCase : Optional[int] = chars[-1]
while len(lowercase ) % self.break_key != 0:
chars.append(lowercase )
return "".join(lowercase )
def A_ ( self , lowercase ):
_lowerCamelCase : List[Any] = self.process_text(text.upper() )
_lowerCamelCase : Dict = ''
for i in range(0 , len(lowercase ) - self.break_key + 1 , self.break_key ):
_lowerCamelCase : Dict = text[i : i + self.break_key]
_lowerCamelCase : Any = [self.replace_letters(lowercase ) for char in batch]
_lowerCamelCase : Optional[Any] = numpy.array([vec] ).T
_lowerCamelCase : int = self.modulus(self.encrypt_key.dot(lowercase ) ).T.tolist()[
0
]
_lowerCamelCase : Tuple = ''.join(
self.replace_digits(lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def A_ ( self ):
_lowerCamelCase : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowerCamelCase : Dict = det % len(self.key_string )
_lowerCamelCase : Tuple = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_lowerCamelCase : Optional[int] = i
break
_lowerCamelCase : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowercase ) )
def A_ ( self , lowercase ):
_lowerCamelCase : Any = self.make_decrypt_key()
_lowerCamelCase : List[Any] = self.process_text(text.upper() )
_lowerCamelCase : Any = ''
for i in range(0 , len(lowercase ) - self.break_key + 1 , self.break_key ):
_lowerCamelCase : Optional[Any] = text[i : i + self.break_key]
_lowerCamelCase : Optional[Any] = [self.replace_letters(lowercase ) for char in batch]
_lowerCamelCase : Union[str, Any] = numpy.array([vec] ).T
_lowerCamelCase : List[Any] = self.modulus(decrypt_key.dot(lowercase ) ).T.tolist()[0]
_lowerCamelCase : Tuple = ''.join(
self.replace_digits(lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _snake_case ( ):
_lowerCamelCase : Optional[int] = int(input('Enter the order of the encryption key: ' ) )
_lowerCamelCase : str = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(lowercase__ ):
_lowerCamelCase : str = [int(lowercase__ ) for x in input().split()]
hill_matrix.append(lowercase__ )
_lowerCamelCase : Optional[int] = HillCipher(numpy.array(lowercase__ ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_lowerCamelCase : str = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_lowerCamelCase : Dict = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(lowercase__ ) )
elif option == "2":
_lowerCamelCase : Any = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 96
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "pix2struct_text_model"
snake_case = ["past_key_values"]
snake_case = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _SCREAMING_SNAKE_CASE=5_0244 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
A_ : Optional[int] = vocab_size
A_ : Any = hidden_size
A_ : Optional[Any] = d_kv
A_ : int = d_ff
A_ : int = num_layers
A_ : Dict = num_heads
A_ : Any = relative_attention_num_buckets
A_ : int = relative_attention_max_distance
A_ : Optional[Any] = dropout_rate
A_ : Optional[Any] = layer_norm_epsilon
A_ : List[Any] = initializer_factor
A_ : Optional[int] = use_cache
A_ : Optional[Any] = eos_token_id
A_ : List[Any] = decoder_start_token_id
# for backwards compatibility
A_ : int = dense_act_fn
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , tie_word_embeddings=_SCREAMING_SNAKE_CASE , is_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
A_ , A_ : Union[str, Any] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A_ : Any = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "pix2struct_vision_model"
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1e-10 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , **_SCREAMING_SNAKE_CASE , )->Optional[int]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ : Dict = hidden_size
A_ : Union[str, Any] = patch_embed_hidden_size
A_ : Optional[Any] = d_ff
A_ : Optional[Any] = dropout_rate
A_ : int = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[str] = initializer_range
A_ : List[str] = initializer_factor
A_ : Union[str, Any] = attention_dropout
A_ : Union[str, Any] = layer_norm_eps
A_ : Dict = dense_act_fn
A_ : Union[str, Any] = seq_len
A_ : Optional[Any] = relative_attention_num_buckets
A_ : Tuple = relative_attention_max_distance
A_ : List[Any] = d_kv
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
A_ , A_ : str = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A_ : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "pix2struct"
snake_case = True
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )->Any:
'''simple docstring'''
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text_config is None:
A_ : Dict = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
A_ : List[Any] = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
A_ : int = PixaStructTextConfig(**_SCREAMING_SNAKE_CASE )
A_ : Dict = PixaStructVisionConfig(**_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.text_config.decoder_start_token_id
A_ : Tuple = self.text_config.pad_token_id
A_ : Union[str, Any] = self.text_config.eos_token_id
A_ : str = initializer_factor
A_ : Tuple = initializer_range
A_ : List[str] = self.initializer_range
A_ : int = self.initializer_range
A_ : List[Any] = is_vqa
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : List[Any] = copy.deepcopy(self.__dict__ )
A_ : Dict = self.text_config.to_dict()
A_ : int = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output
| 186
| 0
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = "cpu" , lowercase_ = None ) -> Optional[Any]:
A__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
A__ = v.half()
if save_path is None: # overwrite src_path
A__ = src_path
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
fire.Fire(convert)
| 364
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes:
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
return baseaa.aaadecode(lowercase_ ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ : Any = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335
|
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335
| 1
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _a ( _snake_case = "AAPL" ):
"""simple docstring"""
UpperCAmelCase = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
UpperCAmelCase = BeautifulSoup(requests.get(_snake_case ).text , """html.parser""" )
UpperCAmelCase = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 234
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCamelCase = ["""text""", """image""", """audio"""]
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_snake_case , _snake_case ):
inputs.append(create_inputs(_snake_case ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
for output in outputs:
if isinstance(_snake_case , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(_snake_case , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(_snake_case , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCamelCase__ :
def _UpperCamelCase ( self ):
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
UpperCAmelCase = self.tool.inputs
for _input in inputs:
if isinstance(_input ,A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _UpperCamelCase ( self ):
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = self.tool(*A )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase = [outputs]
self.assertListEqual(output_types(A ) ,self.tool.outputs )
def _UpperCamelCase ( self ):
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = self.tool(*A )
if not isinstance(A ,A ):
UpperCAmelCase = [outputs]
self.assertEqual(len(A ) ,len(self.tool.outputs ) )
for output, output_type in zip(A ,self.tool.outputs ):
UpperCAmelCase = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(A ,A ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = []
for _input, input_type in zip(A ,self.tool.inputs ):
if isinstance(A ,A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase = self.tool(*A )
if not isinstance(A ,A ):
UpperCAmelCase = [outputs]
self.assertEqual(len(A ) ,len(self.tool.outputs ) )
| 234
| 1
|
import os
import pytest
from attr import dataclass
a__ = """us-east-1""" # defaults region
@dataclass
class snake_case :
'''simple docstring'''
snake_case_ : Optional[Any] = 42
snake_case_ : Dict = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
snake_case_ : str = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 5_00,
"""save_steps""": 55_00,
}
snake_case_ : Optional[int] = {**hyperparameters, """max_steps""": 10_00}
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCamelCase_ ( self : str) -> str:
"""simple docstring"""
return F'''{self.framework}-transfromers-test'''
@property
def UpperCamelCase_ ( self : List[str]) -> str:
"""simple docstring"""
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def UpperCamelCase_ ( self : Optional[int]) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
_snake_case : Any = SageMakerTestEnvironment(framework=request.cls.framework )
| 317
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__A = random.Random()
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=1.0 , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
if rng is None:
__lowerCamelCase = global_rng
__lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=400 , lowerCamelCase__=2_000 , lowerCamelCase__=10 , lowerCamelCase__=160 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_000 , lowerCamelCase__=False , lowerCamelCase__=True , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = min_seq_length
__lowerCamelCase = max_seq_length
__lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCamelCase = padding_value
__lowerCamelCase = sampling_rate
__lowerCamelCase = return_attention_mask
__lowerCamelCase = do_normalize
__lowerCamelCase = feature_size
__lowerCamelCase = chunk_length
__lowerCamelCase = hop_length
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self , lowerCamelCase__=False , lowerCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
__lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = WhisperFeatureExtractor if is_speech_available() else None
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = WhisperFeatureExtractionTester(self )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
__lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
__lowerCamelCase = feat_extract_first.to_dict()
__lowerCamelCase = feat_extract_second.to_dict()
__lowerCamelCase = feat_extract_first.mel_filters
__lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(lowerCamelCase__ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCamelCase__ )
__lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
__lowerCamelCase = feat_extract_first.to_dict()
__lowerCamelCase = feat_extract_second.to_dict()
__lowerCamelCase = feat_extract_first.mel_filters
__lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
__lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
__lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
__lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCamelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowerCamelCase = np.asarray(lowerCamelCase__ )
__lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
__lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
__lowerCamelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
__lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
__lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
__lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
import torch
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCamelCase = np.random.rand(100 , 32 ).astype(np.floataa )
__lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCamelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowerCamelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__lowerCamelCase = ds.sort('id' ).select(range(lowerCamelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
# fmt: off
__lowerCamelCase = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
__lowerCamelCase = self._load_datasamples(1 )
__lowerCamelCase = WhisperFeatureExtractor()
__lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase__ , atol=1e-4 ) )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCamelCase = self._load_datasamples(1 )[0]
__lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
__lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 90
| 0
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def __lowercase ( _a ):
snake_case_ : Dict = np.shape(_a )
if rows != columns:
snake_case_ : Optional[int] = (
'''\'table\' has to be of square shaped array but got a '''
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(_a )
snake_case_ : str = np.zeros((rows, columns) )
snake_case_ : Optional[Any] = np.zeros((rows, columns) )
for i in range(_a ):
for j in range(_a ):
snake_case_ : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(_a ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
snake_case_ : Any = (table[i][j] - total) / upper[j][j]
snake_case_ : Tuple = 1
for j in range(_a , _a ):
snake_case_ : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(_a ) )
snake_case_ : Optional[Any] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ : Union[str, Any] = get_tests_dir('''fixtures''')
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
# A mock response for an HTTP head request to emulate server down
snake_case_ : Any = mock.Mock()
snake_case_ : Tuple = 500
snake_case_ : Dict = {}
snake_case_ : Optional[Any] = HTTPError
snake_case_ : Optional[int] = {}
# Download this model to make sure it's in the cache.
snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowercase_ ) as mock_head:
snake_case_ : Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self : Optional[int] ):
# This test is for deprecated behavior and can be removed in v5
snake_case_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase):
@classmethod
def _snake_case ( cls : List[Any] ):
snake_case_ : Dict = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def _snake_case ( cls : int ):
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def _snake_case ( self : Any ):
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''test-feature-extractor''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _snake_case ( self : List[Any] ):
snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _snake_case ( self : List[Any] ):
CustomFeatureExtractor.register_for_auto_class()
snake_case_ : int = CustomFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
snake_case_ : List[str] = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 155
| 0
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowerCAmelCase__ )] )
lowercase = np.array(lowerCAmelCase__ )
lowercase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowerCAmelCase__ ) ) , x.transpose() ) , lowerCAmelCase__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = (1, 2, 1)
lowercase = (1, 1, 0, 7)
lowercase = SARIMAX(
lowerCAmelCase__ , exog=lowerCAmelCase__ , order=lowerCAmelCase__ , seasonal_order=lowerCAmelCase__ )
lowercase = model.fit(disp=lowerCAmelCase__ , maxiter=600 , method='''nm''' )
lowercase = model_fit.predict(1 , len(lowerCAmelCase__ ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = regressor.predict(lowerCAmelCase__ )
return y_pred[0]
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
train_user.sort()
lowercase = np.percentile(lowerCAmelCase__ , 25 )
lowercase = np.percentile(lowerCAmelCase__ , 75 )
lowercase = qa - qa
lowercase = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 0
lowercase = 0
for i in list_vote:
if i > actual_result:
lowercase = not_safe + 1
else:
if abs(abs(lowerCAmelCase__ ) - abs(lowerCAmelCase__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowercase__ :int = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
lowercase__ :Any = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
lowercase__ :Tuple = Normalizer().fit_transform(data_input_df.values)
# split data
lowercase__ :Union[str, Any] = normalize_df[:, 2].tolist()
lowercase__ :Tuple = normalize_df[:, 0].tolist()
lowercase__ :List[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowercase__ :Optional[Any] = normalize_df[:, [1, 2]].tolist()
lowercase__ :str = x[: len(x) - 1]
lowercase__ :Dict = x[len(x) - 1 :]
# for linear regression & sarimax
lowercase__ :Tuple = total_date[: len(total_date) - 1]
lowercase__ :Any = total_user[: len(total_user) - 1]
lowercase__ :str = total_match[: len(total_match) - 1]
lowercase__ :Optional[int] = total_date[len(total_date) - 1 :]
lowercase__ :int = total_user[len(total_user) - 1 :]
lowercase__ :Union[str, Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowercase__ :int = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowercase__ :Dict = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 101
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Tuple =ShapEPipeline
lowercase_ : List[Any] =['''prompt''']
lowercase_ : int =['''prompt''']
lowercase_ : Union[str, Any] =[
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowercase_ : Optional[int] =False
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return self.time_input_dim * 4
@property
def A__ ( self):
return 8
@property
def A__ ( self):
lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
return tokenizer
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(A__)
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase = PriorTransformer(**A__)
return model
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase = ShapERenderer(**A__)
return model
def A__ ( self):
lowercase = self.dummy_prior
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_renderer
lowercase = HeunDiscreteScheduler(
beta_schedule='''exp''' ,num_train_timesteps=1_0_2_4 ,prediction_type='''sample''' ,use_karras_sigmas=A__ ,clip_sample=A__ ,clip_sample_range=1.0 ,)
lowercase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def A__ ( self):
lowercase = '''cpu'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = pipe(**self.get_dummy_inputs(A__))
lowercase = output.images[0]
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
lowercase = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def A__ ( self):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def A__ ( self):
lowercase = torch_device == '''cpu'''
lowercase = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=A__ ,relax_max_difference=A__ ,)
def A__ ( self):
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = 1
lowercase = 2
lowercase = self.get_dummy_inputs(A__)
for key in inputs.keys():
if key in self.batch_params:
lowercase = batch_size * [inputs[key]]
lowercase = pipe(**A__ ,num_images_per_prompt=A__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''')
lowercase = ShapEPipeline.from_pretrained('''openai/shap-e''')
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = torch.Generator(device=A__).manual_seed(0)
lowercase = pipe(
'''a shark''' ,generator=A__ ,guidance_scale=15.0 ,num_inference_steps=6_4 ,frame_size=6_4 ,output_type='''np''' ,).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(A__ ,A__)
| 101
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 103
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = "arrow", **SCREAMING_SNAKE_CASE_, ) -> int:
super().__init__(
split=SCREAMING_SNAKE_CASE_, features=SCREAMING_SNAKE_CASE_, cache_dir=SCREAMING_SNAKE_CASE_, keep_in_memory=SCREAMING_SNAKE_CASE_, streaming=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Any = load_from_cache_file
UpperCamelCase : Any = file_format
UpperCamelCase : str = Spark(
df=SCREAMING_SNAKE_CASE_, features=SCREAMING_SNAKE_CASE_, cache_dir=SCREAMING_SNAKE_CASE_, working_dir=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> Tuple:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCamelCase : Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=SCREAMING_SNAKE_CASE_, file_format=self._file_format, )
return self.builder.as_dataset(split=self.split )
| 103
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str]=13 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Dict=[10, 20, 30, 40] , __UpperCAmelCase : Any=[2, 2, 3, 2] , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Tuple=10 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __UpperCAmelCase : Optional[int]=[2, 3, 4] , __UpperCAmelCase : Any=None , ):
a : Union[str, Any] = parent
a : Optional[int] = batch_size
a : str = image_size
a : List[Any] = num_channels
a : List[str] = num_stages
a : Optional[int] = hidden_sizes
a : List[Any] = depths
a : int = is_training
a : Optional[int] = use_labels
a : str = intermediate_size
a : str = hidden_act
a : int = num_labels
a : Tuple = initializer_range
a : Optional[Any] = out_features
a : List[Any] = out_indices
a : Union[str, Any] = scope
def __snake_case ( self : List[str]):
a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : str = None
if self.use_labels:
a : Any = ids_tensor([self.batch_size] , self.num_labels)
a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : int):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Dict):
a : int = ConvNextVaModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[str] = model(__UpperCAmelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Dict):
a : Optional[Any] = ConvNextVaForImageClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str]):
a : Optional[int] = ConvNextVaBackbone(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[Any] = model(__UpperCAmelCase)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
a : Any = None
a : Optional[int] = ConvNextVaBackbone(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Optional[Any] = model(__UpperCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def __snake_case ( self : Union[str, Any]):
a : str = self.prepare_config_and_inputs()
a , a , a : Any = config_and_inputs
a : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def __snake_case ( self : Union[str, Any]):
a : int = self.prepare_config_and_inputs()
a , a , a : Optional[int] = config_and_inputs
a : Optional[int] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Tuple = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Optional[int] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : str = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Optional[Any] = False
def __snake_case ( self : List[str]):
a : Tuple = ConvNextVaModelTester(self)
a : str = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def __snake_case ( self : str):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Any):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds")
def __snake_case ( self : Optional[int]):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings")
def __snake_case ( self : int):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking")
def __snake_case ( self : str):
pass
def __snake_case ( self : Optional[int]):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a : Dict = self.model_tester.prepare_config_and_inputs_with_labels()
a : Tuple = True
if model_class.__name__ in [
*get_values(__UpperCAmelCase),
*get_values(__UpperCAmelCase),
]:
continue
a : int = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.train()
a : Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
a : Optional[int] = model(**__UpperCAmelCase).loss
loss.backward()
def __snake_case ( self : Dict):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
a : Optional[int] = False
a : Optional[Any] = True
if (
model_class.__name__
in [*get_values(__UpperCAmelCase), *get_values(__UpperCAmelCase)]
or not model_class.supports_gradient_checkpointing
):
continue
a : Tuple = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.gradient_checkpointing_enable()
model.train()
a : Union[str, Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
a : Optional[int] = model(**__UpperCAmelCase).loss
loss.backward()
def __snake_case ( self : Dict):
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Any = model_class(__UpperCAmelCase)
a : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Any = [*signature.parameters.keys()]
a : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def __snake_case ( self : Dict):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
def check_hidden_states_output(__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int]):
a : Optional[int] = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
with torch.no_grad():
a : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase) , expected_num_stages + 1)
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a , a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Dict = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def __snake_case ( self : Optional[Any]):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = ConvNextVaModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def lowercase ( )-> List[str]:
'''simple docstring'''
a : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Union[str, Any]):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224") if is_vision_available() else None
@slow
def __snake_case ( self : int):
a : Optional[int] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224").to(__UpperCAmelCase)
a : Any = self.default_image_processor
a : Union[str, Any] = prepare_img()
a : List[str] = preprocessor(images=__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a : Any = model(**__UpperCAmelCase)
# verify the logits
a : str = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a : Union[str, Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386]).to(__UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4))
| 40
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__a: Tuple = None
__a: Tuple = logging.get_logger(__name__)
__a: Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__a: Optional[Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__a: Tuple = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = TaTokenizer
SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=100 , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> Union[str, Any]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ : Union[str, Any] = [F"""<extra_id_{i}>""" for i in range(__lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ : Dict = len(set(filter(lambda __lowerCAmelCase : bool('''extra_id_''' in str(__lowerCAmelCase ) ) , __lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , extra_ids=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : Union[str, Any] = vocab_file
lowercase__ : Optional[int] = False if not self.vocab_file else True
lowercase__ : Any = extra_ids
@staticmethod
def _lowerCAmelCase( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __lowerCAmelCase , )
return max_model_length
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
logger.info(F"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Any = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ : Dict = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCAmelCase( self ) -> List[Any]:
return list(
set(filter(lambda __lowerCAmelCase : bool(re.search(r'''<extra_id_\d+>''' , __lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCAmelCase( self ) -> Tuple:
return [self.convert_tokens_to_ids(__lowerCAmelCase ) for token in self.get_sentinel_tokens()]
| 198
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = TextToVideoSDPipeline
UpperCamelCase = TEXT_TO_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCamelCase = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(_UpperCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowercase__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any]=0 ) -> int:
'''simple docstring'''
if str(_UpperCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = TextToVideoSDPipeline(**_UpperCAmelCase )
UpperCAmelCase_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(_UpperCAmelCase )
UpperCAmelCase_ = "np"
UpperCAmelCase_ = sd_pipe(**_UpperCAmelCase ).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 241
|
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase_ = model_name.find("patch" )
UpperCAmelCase_ = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
UpperCAmelCase_ = XCLIPVisionConfig(patch_size=lowerCAmelCase__ , num_frames=lowerCAmelCase__ )
if "large" in model_name:
UpperCAmelCase_ = 768
UpperCAmelCase_ = 3072
UpperCAmelCase_ = 12
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 16
UpperCAmelCase_ = 24
UpperCAmelCase_ = 768
UpperCAmelCase_ = 3072
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ = 336
UpperCAmelCase_ = XCLIPConfig.from_text_vision_configs(lowerCAmelCase__ , lowerCAmelCase__ )
if "large" in model_name:
UpperCAmelCase_ = 768
return config
def a__ ( lowerCAmelCase__ ):
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase_ = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
UpperCAmelCase_ = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
UpperCAmelCase_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
UpperCAmelCase_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
UpperCAmelCase_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
UpperCAmelCase_ = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
UpperCAmelCase_ = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase_ = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
UpperCAmelCase_ = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase_ = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
UpperCAmelCase_ = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
UpperCAmelCase_ = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
UpperCAmelCase_ = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
UpperCAmelCase_ = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
UpperCAmelCase_ = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
UpperCAmelCase_ = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
UpperCAmelCase_ = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase_ = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
UpperCAmelCase_ = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase_ = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
UpperCAmelCase_ = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
UpperCAmelCase_ = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(lowerCAmelCase__ )
if "attn.in_proj" in key:
UpperCAmelCase_ = key.split("." )
if key.startswith("visual" ):
UpperCAmelCase_ = key_split[3]
UpperCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase_ = val[
:dim, :
]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[
-dim:, :
]
else:
UpperCAmelCase_ = val[
:dim
]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase_ = val[
:dim, :
]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[
-dim:, :
]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[-dim:]
elif key.startswith("mit" ):
UpperCAmelCase_ = key_split[2]
UpperCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = key_split[2]
UpperCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = rename_key(lowerCAmelCase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase_ = val.T
UpperCAmelCase_ = val
return orig_state_dict
def a__ ( lowerCAmelCase__ ):
if num_frames == 8:
UpperCAmelCase_ = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
UpperCAmelCase_ = "eating_spaghetti.npy"
elif num_frames == 32:
UpperCAmelCase_ = "eating_spaghetti_32_frames.npy"
UpperCAmelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=lowerCAmelCase__ , repo_type="dataset" , )
UpperCAmelCase_ = np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=False ):
UpperCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
UpperCAmelCase_ = model_to_url[model_name]
UpperCAmelCase_ = 8
if "16-frames" in model_name:
UpperCAmelCase_ = 16
elif "shot" in model_name:
UpperCAmelCase_ = 32
UpperCAmelCase_ = get_xclip_config(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = XCLIPModel(lowerCAmelCase__ )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase_ = "pytorch_model.bin"
gdown.cached_download(lowerCAmelCase__ , lowerCAmelCase__ , quiet=lowerCAmelCase__ )
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location="cpu" )["model"]
else:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )["model"]
UpperCAmelCase_ = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = XCLIPModel(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase_ = 336 if model_name == "xclip-large-patch14-16-frames" else 224
UpperCAmelCase_ = VideoMAEImageProcessor(size=lowerCAmelCase__ )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ = XCLIPProcessor(image_processor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
UpperCAmelCase_ = prepare_video(lowerCAmelCase__ )
UpperCAmelCase_ = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=lowerCAmelCase__ , return_tensors="pt" , padding=lowerCAmelCase__ )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase__ )
# Verify outputs
UpperCAmelCase_ = outputs.logits_per_video
UpperCAmelCase_ = logits_per_video.softmax(dim=1 )
print("Probs:" , lowerCAmelCase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase_ = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase_ = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase_ = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase_ = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase_ = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase_ = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(lowerCAmelCase__ , organization="nielsr" )
processor.push_to_hub(lowerCAmelCase__ , organization="nielsr" )
slow_tokenizer.push_to_hub(lowerCAmelCase__ , organization="nielsr" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 241
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = """▁"""
UpperCAmelCase : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase : Tuple = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
UpperCAmelCase : str = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
UpperCAmelCase : Tuple = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = ["""input_ids""", """attention_mask"""]
_lowercase : List[int] = []
_lowercase : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
a__ : Any =AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
a__ : str ={} if sp_model_kwargs is None else sp_model_kwargs
a__ : Dict =kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
a__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
a__ : str =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a__ : Optional[int] ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ : Dict =1
a__ : List[Any] =len(self.sp_model )
a__ : List[str] ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
a__ : Optional[int] ={v: k for k, v in self.lang_code_to_id.items()}
a__ : Any =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a__ : Tuple ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
a__ : Optional[int] =src_lang if src_lang is not None else "en_XX"
a__ : int =self.lang_code_to_id[self._src_lang]
a__ : Dict =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _lowercase ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : Tuple =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
'''simple docstring'''
a__ : Tuple =self.__dict__.copy()
a__ : List[Any] =None
return state
def __setstate__( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : Optional[int] ={}
a__ : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] ={self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ : Union[str, Any] =self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowercase ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ : str =[]
a__ : Tuple =""
a__ : str =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
a__ : Optional[int] =True
a__ : Dict =[]
else:
current_sub_tokens.append(lowerCAmelCase__ )
a__ : Optional[Any] =False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : List[str] =os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
a__ : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
a__ : List[str] =[1] * len(self.prefix_tokens )
a__ : List[str] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a__ : Any =src_lang
a__ : Optional[int] =self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Any =self.convert_tokens_to_ids(lowerCAmelCase__ )
a__ : int =tgt_lang_id
return inputs
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = "en_XX" , lowerCAmelCase__ = None , lowerCAmelCase__ = "ro_RO" , **lowerCAmelCase__ , ) -> BatchEncoding:
'''simple docstring'''
a__ : Optional[Any] =src_lang
a__ : Optional[int] =tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ) -> str:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : Tuple =self.lang_code_to_id[src_lang]
a__ : Optional[Any] =[self.cur_lang_code_id]
a__ : Optional[int] =[self.eos_token_id]
def _lowercase ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : Tuple =self.lang_code_to_id[tgt_lang]
a__ : List[Any] =[self.cur_lang_code_id]
a__ : str =[self.eos_token_id]
| 95
|
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( lowercase__ : int , lowercase__ : List[str] , lowercase__ : List[str] ):
# Initialise PyTorch model
snake_case : Optional[Any] = TaConfig.from_json_file(lowercase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case : Tuple = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 148
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case_ = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
snake_case_ = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Tuple = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = ['input_ids', 'attention_mask']
A_ : Optional[int] = RobertaTokenizer
def __init__(self : int , a__ : Dict=None , a__ : List[Any]=None , a__ : Any=None , a__ : List[str]="replace" , a__ : Dict="<s>" , a__ : str="</s>" , a__ : List[str]="</s>" , a__ : Optional[int]="<s>" , a__ : List[Any]="<unk>" , a__ : Tuple="<pad>" , a__ : Optional[int]="<mask>" , a__ : Tuple=False , a__ : Tuple=True , **a__ : int , ):
"""simple docstring"""
super().__init__(
a__ , a__ , tokenizer_file=a__ , errors=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , trim_offsets=a__ , **a__ , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , a__ ) != add_prefix_space:
__snake_case = getattr(a__ , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**a__ )
__snake_case = add_prefix_space
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , a__ , a__ )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , a__ ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , a__ ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(a__ , state.pop('''type''' ) )
__snake_case = component_class(**a__ )
setattr(self.backend_tokenizer , a__ , a__ )
@property
def a (self : Union[str, Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def a (self : Optional[int] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else value
__snake_case = value
def a (self : str , *a__ : Union[str, Any] , **a__ : int ):
"""simple docstring"""
__snake_case = kwargs.get('''is_split_into_words''' , a__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a__ , **a__ )
def a (self : Dict , *a__ : int , **a__ : List[str] ):
"""simple docstring"""
__snake_case = kwargs.get('''is_split_into_words''' , a__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a__ , **a__ )
def a (self : Dict , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
__snake_case = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def a (self : Optional[int] , a__ : Any , a__ : List[str]=None ):
"""simple docstring"""
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a (self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 238
|
from __future__ import annotations
snake_case_ = [True] * 1000001
snake_case_ = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
snake_case_ = False
i += 1
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
return seive[n]
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
return any(digit in '''02468''' for digit in str(snake_case_ ) )
def lowerCamelCase__ ( snake_case_ : int = 100_0000 ) -> list[int]:
__snake_case = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(snake_case_ ) and not contains_an_even_digit(snake_case_ ):
__snake_case = str(snake_case_ )
__snake_case = [int(str_num[j:] + str_num[:j] ) for j in range(len(snake_case_ ) )]
if all(is_prime(snake_case_ ) for i in list_nums ):
result.append(snake_case_ )
return result
def lowerCamelCase__ ( ) -> int:
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'{len(find_circular_primes()) = }')
| 238
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A__: str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276
|
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> Optional[int]:
'''simple docstring'''
_a , _a : List[str] =text, pattern
_a , _a : Union[str, Any] =len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __UpperCAmelCase ( self :Union[str, Any] ) -> list[int]:
'''simple docstring'''
# searches pattern in text and returns index positions
_a : Union[str, Any] =[]
for i in range(self.textLen - self.patLen + 1 ):
_a : Any =self.mismatch_in_text(SCREAMING_SNAKE_CASE )
if mismatch_index == -1:
positions.append(SCREAMING_SNAKE_CASE )
else:
_a : int =self.match_in_pattern(self.text[mismatch_index] )
_a : List[str] =(
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A__: Any = '''ABAABA'''
A__: int = '''AB'''
A__: Optional[int] = BoyerMooreSearch(text, pattern)
A__: Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 276
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _lowercase ) -> float:
return 1_0 - x * x
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(_lowercase ) * equation(_lowercase ) >= 0:
raise ValueError("""Wrong space!""" )
UpperCAmelCase : Any = a
while (b - a) >= 0.01:
# Find middle point
UpperCAmelCase : str = (a + b) / 2
# Check if middle point is root
if equation(_lowercase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowercase ) * equation(_lowercase ) < 0:
UpperCAmelCase : Union[str, Any] = c
else:
UpperCAmelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 357
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
a : Optional[int] = 1_0
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
for i in range(_lowercase , _lowercase ):
if array[i] == target:
return i
return -1
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = len(_lowercase )
while left <= right:
if right - left < precision:
return lin_search(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : Union[str, Any] = (left + right) // 3 + 1
UpperCAmelCase : Union[str, Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCAmelCase : Any = one_third - 1
elif array[two_third] < target:
UpperCAmelCase : Tuple = two_third + 1
else:
UpperCAmelCase : int = one_third + 1
UpperCAmelCase : List[Any] = two_third - 1
else:
return -1
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
if left < right:
if right - left < precision:
return lin_search(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : str = (left + right) // 3 + 1
UpperCAmelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowercase , one_third - 1 , _lowercase , _lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowercase , _lowercase , _lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowercase , _lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = input("""Enter numbers separated by comma:\n""").strip()
a : Any = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
a : Tuple = int(input("""Enter the number to be found in the list:\n""").strip())
a : Union[str, Any] = ite_ternary_search(collection, target)
a : Optional[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 338
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : int=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 * 8 , SCREAMING_SNAKE_CASE__ : Dict=3_2 * 8 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 , ) -> str:
a_ : Optional[Any] = parent
a_ : List[Any] = batch_size
a_ : List[Any] = is_training
a_ : str = use_auxiliary_loss
a_ : str = num_queries
a_ : str = num_channels
a_ : Union[str, Any] = min_size
a_ : Union[str, Any] = max_size
a_ : int = num_labels
a_ : List[Any] = hidden_dim
a_ : int = hidden_dim
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
a_ : Any = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE__ )
a_ : int = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE__ )
a_ : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE__ ) > 0.5
).float()
a_ : int = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE__ ) > 0.5).long()
a_ : Any = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ : Any = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
a_ : Tuple = self.num_queries
a_ : Tuple = self.num_labels
a_ : Optional[int] = [1, 1, 1, 1]
a_ : Union[str, Any] = self.num_channels
a_ : str = 6_4
a_ : List[Any] = 1_2_8
a_ : Tuple = self.hidden_dim
a_ : Dict = self.hidden_dim
a_ : Optional[Any] = self.hidden_dim
return config
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
a_ , a_ , a_ , a_ , a_ : List[str] = self.prepare_config_and_inputs()
a_ : Dict = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
a_ : List[str] = output.encoder_hidden_states
a_ : Any = output.pixel_decoder_hidden_states
a_ : List[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) , config.decoder_layers )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> Optional[int]:
with torch.no_grad():
a_ : int = MaskaFormerModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Dict = model(pixel_values=SCREAMING_SNAKE_CASE__ , pixel_mask=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
a_ : Dict = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE__ : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a_ : str = model(pixel_values=SCREAMING_SNAKE_CASE__ , pixel_mask=SCREAMING_SNAKE_CASE__ )
a_ : str = model(SCREAMING_SNAKE_CASE__ )
comm_check_on_output(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = model(
pixel_values=SCREAMING_SNAKE_CASE__ , pixel_mask=SCREAMING_SNAKE_CASE__ , mask_labels=SCREAMING_SNAKE_CASE__ , class_labels=SCREAMING_SNAKE_CASE__ )
comm_check_on_output(SCREAMING_SNAKE_CASE__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case__ : List[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
snake_case__ : Dict = False
snake_case__ : Optional[int] = False
snake_case__ : Tuple = False
snake_case__ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
a_ : Optional[Any] = MaskaFormerModelTester(self )
a_ : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Dict = model_class(SCREAMING_SNAKE_CASE__ )
a_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Optional[Any] = [*signature.parameters.keys()]
a_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
a_ : Any = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
a_ : List[str] = (self.model_tester.min_size,) * 2
a_ : Dict = {
'pixel_values': torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE__ ),
'mask_labels': torch.randn((2, 1_0, *size) , device=SCREAMING_SNAKE_CASE__ ),
'class_labels': torch.zeros(2 , 1_0 , device=SCREAMING_SNAKE_CASE__ ).long(),
}
a_ : Optional[int] = self.model_tester.get_config()
a_ : int = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = model(**SCREAMING_SNAKE_CASE__ )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Any = model_class(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
a_ : str = model(**SCREAMING_SNAKE_CASE__ , output_attentions=SCREAMING_SNAKE_CASE__ )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
a_ : Optional[int] = self.all_model_classes[1]
a_ , a_ , a_ , a_ , a_ : int = self.model_tester.prepare_config_and_inputs()
a_ : int = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
a_ : List[str] = model(SCREAMING_SNAKE_CASE__ , mask_labels=SCREAMING_SNAKE_CASE__ , class_labels=SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
a_ : Dict = self.all_model_classes[1]
a_ , a_ , a_ , a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs()
a_ : Dict = True
a_ : Optional[int] = True
a_ : List[Any] = model_class(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
model.train()
a_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , mask_labels=SCREAMING_SNAKE_CASE__ , class_labels=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a_ : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
a_ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a_ : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ : Dict = 1e-4
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
"""simple docstring"""
a_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
a_ : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE__ )
a_ : Any = self.default_image_processor
a_ : Union[str, Any] = prepare_img()
a_ : List[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE__ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
a_ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
a_ : str = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
a_ : str = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
a_ : str = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE__ ).eval()
a_ : List[Any] = self.default_image_processor
a_ : Tuple = prepare_img()
a_ : Optional[int] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE__ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
a_ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
# masks_queries_logits
a_ : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
a_ : List[str] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
a_ : Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
# class_queries_logits
a_ : Tuple = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
a_ : Optional[int] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE__ ).eval()
a_ : List[str] = self.default_image_processor
a_ : List[str] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='pt' , )
a_ : Any = inputs['pixel_values'].to(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = [el.to(SCREAMING_SNAKE_CASE__ ) for el in inputs['mask_labels']]
a_ : str = [el.to(SCREAMING_SNAKE_CASE__ ) for el in inputs['class_labels']]
with torch.no_grad():
a_ : Tuple = model(**SCREAMING_SNAKE_CASE__ )
self.assertTrue(outputs.loss is not None )
| 32
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
snake_case__ : Any = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int=0 ) -> Tuple:
a_ : Union[str, Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) )
a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.get_dummy_inputs()
a_ : int = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : Tuple = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : List[Any] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = self.get_dummy_inputs()
a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : str = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
a_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.get_dummy_inputs()
a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Optional[Any] = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
a_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = self.get_dummy_inputs()
a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : int = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.get_dummy_inputs()
a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Union[str, Any] = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : List[str] = ort.SessionOptions()
a_ : int = False
return options
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
a_ : int = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = 'A fantasy landscape, trending on artstation'
a_ : str = torch.manual_seed(0 )
a_ : List[str] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
a_ : Dict = output.images
a_ : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ : str = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
a_ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
a_ : List[str] = init_image.resize((1_2_8, 1_2_8) )
a_ : Dict = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
a_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Any = 'A fantasy landscape, trending on artstation'
a_ : Tuple = torch.manual_seed(0 )
a_ : Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
a_ : str = output.images
a_ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Tuple = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 32
| 1
|
'''simple docstring'''
import math
import unittest
def lowerCAmelCase__ ( lowerCamelCase : int ):
assert isinstance(lowerCamelCase ,lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowerCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Tuple):
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def A ( self : int):
with self.assertRaises(SCREAMING_SNAKE_CASE):
is_prime(-19)
self.assertFalse(
is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 227
|
'''simple docstring'''
from __future__ import annotations
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : list[list[int]]):
_A : Dict = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(SCREAMING_SNAKE_CASE) != 0:
_A : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(SCREAMING_SNAKE_CASE) != cols:
raise error
for value in row:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float)):
raise error
_A : str = rows
else:
_A : Tuple = []
def A ( self : Optional[Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def A ( self : List[str]):
return len(self.rows)
@property
def A ( self : Optional[int]):
return len(self.rows[0])
@property
def A ( self : Optional[int]):
return (self.num_rows, self.num_columns)
@property
def A ( self : Any):
return self.order[0] == self.order[1]
def A ( self : int):
_A : Union[str, Any] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def A ( self : Tuple):
return bool(self.determinant())
def A ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int):
_A : str = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(SCREAMING_SNAKE_CASE).determinant()
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int):
if (row + column) % 2 == 0:
return self.get_minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
return -1 * self.get_minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
return Matrix(
[
[self.get_minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def A ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def A ( self : Tuple):
_A : str = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(SCREAMING_SNAKE_CASE)
def A ( self : Tuple):
_A : str = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : str):
return str(self.rows)
def __str__( self : Optional[int]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(SCREAMING_SNAKE_CASE) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int | None = None):
_A : Tuple = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
raise type_error
for value in row:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float)):
raise type_error
if len(SCREAMING_SNAKE_CASE) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(SCREAMING_SNAKE_CASE)
else:
_A : List[str] = self.rows[0:position] + [row] + self.rows[position:]
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int | None = None):
_A : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
raise type_error
for value in column:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float)):
raise type_error
if len(SCREAMING_SNAKE_CASE) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
_A : str = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
_A : Dict = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[str] , SCREAMING_SNAKE_CASE : object):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , SCREAMING_SNAKE_CASE : object):
return not self == other
def __neg__( self : Optional[Any]):
return self * -1
def __add__( self : Any , SCREAMING_SNAKE_CASE : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : List[str] , SCREAMING_SNAKE_CASE : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Dict , SCREAMING_SNAKE_CASE : Matrix | int | float):
if isinstance(SCREAMING_SNAKE_CASE , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Any , SCREAMING_SNAKE_CASE : int):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
_A : Union[str, Any] = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def A ( cls : List[str] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[int]):
return sum(row[i] * column[i] for i in range(len(SCREAMING_SNAKE_CASE)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227
| 1
|
lowerCAmelCase__ = 0 # The first color of the flag.
lowerCAmelCase__ = 1 # The second color of the flag.
lowerCAmelCase__ = 2 # The third color of the flag.
lowerCAmelCase__ = (red, white, blue)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list ) -> list:
'''simple docstring'''
if not sequence:
return []
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return list(SCREAMING_SNAKE_CASE_ )
A__ = 0
A__ = len(SCREAMING_SNAKE_CASE_ ) - 1
A__ = 0
while mid <= high:
if sequence[mid] == colors[0]:
A__ , A__ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
A__ , A__ = sequence[high], sequence[mid]
high -= 1
else:
A__ = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(""",""")]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 68
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> list:
'''simple docstring'''
A__ = int(SCREAMING_SNAKE_CASE_ )
if n_element < 1:
A__ = ValueError("a should be a positive number" )
raise my_error
A__ = [1]
A__ , A__ , A__ = (0, 0, 0)
A__ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase__ = hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 68
| 1
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _snake_case ( unittest.TestCase ):
UpperCamelCase__ = MODEL_FOR_MASKED_LM_MAPPING
UpperCamelCase__ = TF_MODEL_FOR_MASKED_LM_MAPPING
def SCREAMING_SNAKE_CASE ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
__magic_name__ : Optional[int] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38_015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25_506, "token_str": " accuser"},
] , )
__magic_name__ : str = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38_015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25_506,
"token_str": " accuser",
},
] , )
__magic_name__ : int = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13_606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3_499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2_941, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
__magic_name__ : Any = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35_676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16_416, "token_str": "ELS"},
] , )
__magic_name__ : Tuple = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35_676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16_416, "token_str": "ELS"},
] , )
__magic_name__ : Any = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3_499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2_941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13_606, "token_str": " Clara"},
] , )
__magic_name__ : Union[str, Any] = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35_676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16_416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35_676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16_416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
__magic_name__ : str = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(_a )
@slow
@require_tf
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_a ) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1_573, "token_str": " Chris"},
] , )
__magic_name__ : Dict = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_a ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2_201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12_790,
"token_str": " Lyon",
},
] , )
__magic_name__ : List[str] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3_499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13_606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2_941, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
__magic_name__ : Tuple = None
__magic_name__ : Optional[int] = None
self.run_pipeline_test(_a , [] )
@require_tf
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
__magic_name__ : Any = None
__magic_name__ : Any = None
self.run_pipeline_test(_a , [] )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
__magic_name__ : int = FillMaskPipeline(model=_a , tokenizer=_a )
__magic_name__ : Optional[Any] = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Union[str, Any] = fill_masker.tokenizer
__magic_name__ : int = fill_masker.model
__magic_name__ : int = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
] , )
__magic_name__ : List[Any] = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
] , )
__magic_name__ : Tuple = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
],
[
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("This is" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : str = tokenizer.get_vocab()
__magic_name__ : Any = sorted(vocab.keys() )[:2]
# Pipeline argument
__magic_name__ : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
__magic_name__ : Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
] , )
__magic_name__ : str = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _a )
__magic_name__ : int = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_a ) )
# Call argument
__magic_name__ : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
__magic_name__ : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
] , )
__magic_name__ : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _a )
__magic_name__ : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_a ) )
# Score equivalence
__magic_name__ : Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_a )
__magic_name__ : Optional[int] = [top_mask["token_str"] for top_mask in outputs]
__magic_name__ : Union[str, Any] = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
__magic_name__ : Optional[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_a )
__magic_name__ : Dict = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
__magic_name__ : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
__magic_name__ : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""] )
with self.assertRaises(_a ):
__magic_name__ : str = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="" )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
__magic_name__ : int = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
] , )
__magic_name__ : List[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
__magic_name__ : List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Tuple = tokenizer.get_vocab()
__magic_name__ : List[str] = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
__magic_name__ : int = sorted(vocab.keys() )[:3]
__magic_name__ : Any = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__magic_name__ : Optional[Any] = [el["token_str"] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
__magic_name__ : Dict = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
__magic_name__ : Optional[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
__magic_name__ : str = sorted(vocab.keys() )[:3]
__magic_name__ : Tuple = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__magic_name__ : Tuple = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : str = FillMaskPipeline(model=_a , tokenizer=_a )
__magic_name__ : str = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
],
[
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
],
[
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
{"sequence": ANY(_a ), "score": ANY(_a ), "token": ANY(_a ), "token_str": ANY(_a )},
],
] , )
| 41
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( snake_case ):
UpperCamelCase__ = ['image_processor', 'tokenizer']
UpperCamelCase__ = 'BridgeTowerImageProcessor'
UpperCamelCase__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , _a , _a ):
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
__magic_name__ : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
__magic_name__ : List[str] = self.image_processor(
_a , return_tensors=_a , do_normalize=_a , do_center_crop=_a , **_a )
encoding.update(_a )
return encoding
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.tokenizer.model_input_names
__magic_name__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 41
| 1
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCAmelCase__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self , A_=None , **A_ ) -> Union[str, Any]:
super().__init__(features=A_ )
__UpperCamelCase =torch_tensor_kwargs
import torch # noqa import torch at initialization
def _a ( self , A_ ) -> Union[str, Any]:
import torch
if isinstance(A_ , A_ ) and column:
if all(
isinstance(A_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(A_ )
return column
def _a ( self , A_ ) -> Any:
import torch
if isinstance(A_ , (str, bytes, type(A_ )) ):
return value
elif isinstance(A_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCamelCase ={}
if isinstance(A_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCamelCase ={'dtype': torch.intaa}
elif isinstance(A_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCamelCase ={'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A_ , PIL.Image.Image ):
__UpperCamelCase =np.asarray(A_ )
return torch.tensor(A_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _a ( self , A_ ) -> Any:
import torch
# support for torch, tf, jax etc.
if hasattr(A_ , '__array__' ) and not isinstance(A_ , torch.Tensor ):
__UpperCamelCase =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A_ ) for substruct in data_struct] )
elif isinstance(A_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A_ ) for substruct in data_struct] )
return self._tensorize(A_ )
def _a ( self , A_ ) -> List[Any]:
return map_nested(self._recursive_tensorize , A_ , map_list=A_ )
def _a ( self , A_ ) -> Mapping:
__UpperCamelCase =self.numpy_arrow_extractor().extract_row(A_ )
__UpperCamelCase =self.python_features_decoder.decode_row(A_ )
return self.recursive_tensorize(A_ )
def _a ( self , A_ ) -> "torch.Tensor":
__UpperCamelCase =self.numpy_arrow_extractor().extract_column(A_ )
__UpperCamelCase =self.python_features_decoder.decode_column(A_ , pa_table.column_names[0] )
__UpperCamelCase =self.recursive_tensorize(A_ )
__UpperCamelCase =self._consolidate(A_ )
return column
def _a ( self , A_ ) -> Mapping:
__UpperCamelCase =self.numpy_arrow_extractor().extract_batch(A_ )
__UpperCamelCase =self.python_features_decoder.decode_batch(A_ )
__UpperCamelCase =self.recursive_tensorize(A_ )
for column_name in batch:
__UpperCamelCase =self._consolidate(batch[column_name] )
return batch
| 62
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "trocr"
SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : str = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self ,UpperCAmelCase_=5_02_65 ,UpperCAmelCase_=10_24 ,UpperCAmelCase_=12 ,UpperCAmelCase_=16 ,UpperCAmelCase_=40_96 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=True ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=0 ,UpperCAmelCase_=2 ,**UpperCAmelCase_ ,):
_lowercase : List[Any] = vocab_size
_lowercase : Optional[Any] = d_model
_lowercase : List[Any] = decoder_layers
_lowercase : Optional[int] = decoder_attention_heads
_lowercase : Dict = decoder_ffn_dim
_lowercase : Any = activation_function
_lowercase : Tuple = max_position_embeddings
_lowercase : int = dropout
_lowercase : Union[str, Any] = attention_dropout
_lowercase : Optional[Any] = activation_dropout
_lowercase : List[Any] = init_std
_lowercase : List[Any] = decoder_layerdrop
_lowercase : str = use_cache
_lowercase : Tuple = scale_embedding
_lowercase : Optional[int] = use_learned_position_embeddings
_lowercase : str = layernorm_embedding
super().__init__(
pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,decoder_start_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
| 361
|
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : List[str] = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 118
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 118
| 1
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase__ : str = get_logger(__name__)
lowercase__ : List[str] = Path(__file__).parent / '''model_card_template.md'''
lowercase__ : Union[str, Any] = uuida().hex
lowercase__ : Tuple = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Optional[int] = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __lowercase ( _a = None ):
snake_case_ : List[str] = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_a , _a ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(_a , _a ):
ua += "; " + user_agent
return ua
def __lowercase ( _a , _a = None , _a = None ):
if token is None:
snake_case_ : Union[str, Any] = HfFolder.get_token()
if organization is None:
snake_case_ : int = whoami(_a )['''name''']
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def __lowercase ( _a , _a ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(_a , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ : Union[str, Any] = args.hub_token if hasattr(_a , '''hub_token''' ) else None
snake_case_ : Dict = get_full_repo_name(_a , token=_a )
snake_case_ : List[str] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_a , model_name=_a , repo_name=_a , dataset_name=args.dataset_name if hasattr(_a , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_a , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_a , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(_a , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_a , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_a , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_a , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_a , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_a , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(_a , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_a , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ : Tuple = os.path.join(args.output_dir , '''README.md''' )
model_card.save(_a )
def __lowercase ( _a , _a = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ : Tuple = str(Path(_a ).as_posix() )
snake_case_ : int = re.search(r'''snapshots/([^/]+)/''' , _a )
if search is None:
return None
snake_case_ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase__ : str = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowercase__ : List[Any] = os.path.join(hf_cache_home, '''diffusers''')
def __lowercase ( _a = None , _a = None ):
if new_cache_dir is None:
snake_case_ : Tuple = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ : List[str] = old_diffusers_cache
snake_case_ : Union[str, Any] = Path(_a ).expanduser()
snake_case_ : str = Path(_a ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ : List[Any] = new_cache_dir / old_blob_path.relative_to(_a )
new_blob_path.parent.mkdir(parents=_a , exist_ok=_a )
os.replace(_a , _a )
try:
os.symlink(_a , _a )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase__ : Optional[Any] = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowercase__ : Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
lowercase__ : Optional[Any] = int(f.read())
except ValueError:
lowercase__ : Optional[Any] = 0
if cache_version < 1:
lowercase__ : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowercase__ : Optional[Any] = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __lowercase ( _a , _a = None ):
if variant is not None:
snake_case_ : str = weights_name.split('''.''' )
snake_case_ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
snake_case_ : List[Any] = '''.'''.join(_a )
return weights_name
def __lowercase ( _a , *,
_a , _a , _a , _a , _a , _a , _a , _a , _a , _a , _a=None , ):
snake_case_ : Dict = str(_a )
if os.path.isfile(_a ):
return pretrained_model_name_or_path
elif os.path.isdir(_a ):
if os.path.isfile(os.path.join(_a , _a ) ):
# Load from a PyTorch checkpoint
snake_case_ : Dict = os.path.join(_a , _a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_a , _a , _a ) ):
snake_case_ : List[Any] = os.path.join(_a , _a , _a )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_a ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ : str = hf_hub_download(
_a , filename=_add_variant(_a , _a ) , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , user_agent=_a , subfolder=_a , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , _a , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_a , _a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(_a , _a )}' so that the correct variant file can be added." , _a , )
try:
# 2. Load model file as usual
snake_case_ : Tuple = hf_hub_download(
_a , filename=_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , user_agent=_a , subfolder=_a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'''this model name. Check the model page at '''
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 155
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _snake_case ( self : List[str] ):
snake_case_, snake_case_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
snake_case_ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
snake_case_ : Tuple = jax.device_count()
snake_case_ : Dict = num_samples * [prompt]
snake_case_ : Tuple = sd_pipe.prepare_inputs(lowercase_ )
snake_case_ : str = replicate(lowercase_ )
snake_case_ : Any = shard(lowercase_ )
snake_case_ : Optional[int] = jax.random.PRNGKey(0 )
snake_case_ : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
snake_case_ : Optional[Any] = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
snake_case_ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ : str = images[0, 253:256, 253:256, -1]
snake_case_ : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ : int = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _snake_case ( self : str ):
snake_case_ : Optional[Any] = '''stabilityai/stable-diffusion-2'''
snake_case_, snake_case_ : Union[str, Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' )
snake_case_, snake_case_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision='''bf16''' , dtype=jnp.bfloataa , )
snake_case_ : List[Any] = scheduler_params
snake_case_ : int = '''A painting of a squirrel eating a burger'''
snake_case_ : str = jax.device_count()
snake_case_ : Union[str, Any] = num_samples * [prompt]
snake_case_ : int = sd_pipe.prepare_inputs(lowercase_ )
snake_case_ : List[str] = replicate(lowercase_ )
snake_case_ : List[Any] = shard(lowercase_ )
snake_case_ : int = jax.random.PRNGKey(0 )
snake_case_ : Tuple = jax.random.split(lowercase_ , jax.device_count() )
snake_case_ : int = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
snake_case_ : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ : List[str] = images[0, 253:256, 253:256, -1]
snake_case_ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ : Optional[int] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 155
| 1
|
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
while b:
a , a :Optional[Any] = b, a % b
return a
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b )
def __lowerCamelCase ( ):
"""simple docstring"""
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 94
|
import pprint
import requests
a__ = """https://zenquotes.io/api"""
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
a__ = random_quotes()
pprint.pprint(response)
| 317
| 0
|
"""simple docstring"""
__A : List[Any] = 256
# Modulus to hash a string
__A : int = 1000003
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
if p_len > t_len:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = '''abc1abc12'''
_UpperCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_UpperCAmelCase = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test 2)
_UpperCAmelCase = '''ABABX'''
_UpperCAmelCase = '''ABABZABABYABABX'''
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test 3)
_UpperCAmelCase = '''AAAB'''
_UpperCAmelCase = '''ABAAAAAB'''
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test 4)
_UpperCAmelCase = '''abcdabcy'''
_UpperCAmelCase = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test 5)
_UpperCAmelCase = '''Lü'''
_UpperCAmelCase = '''Lüsai'''
assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''Lue'''
assert not rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 359
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 326
| 0
|
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __A ( a_ :List[Any]) -> str: # picklable for multiprocessing
return x.sum()
def __A ( a_ :str) -> List[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : int = {}
__a : Any = []
__a : Tuple = 1
__a : Any = [1, 2]
__a : str = {'''a''': 1, '''b''': 2}
__a : Union[str, Any] = {'''a''': [1, 2], '''b''': [3, 4]}
__a : int = {'''a''': {'''1''': 1}, '''b''': 2}
__a : Optional[int] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__a : List[str] = {}
__a : List[str] = []
__a : Optional[int] = 2
__a : Optional[Any] = [2, 3]
__a : int = {'''a''': 2, '''b''': 3}
__a : List[Any] = {'''a''': [2, 3], '''b''': [4, 5]}
__a : Tuple = {'''a''': {'''1''': 2}, '''b''': 3}
__a : List[str] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
__a : Optional[Any] = 2
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
__a : Optional[int] = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
__a : Optional[Any] = {'''a''': 2, '''b''': 0, '''c''': 2}
__a : Optional[int] = {
'''a''': np.eye(2 ).astype(_UpperCAmelCase ),
'''b''': np.zeros(3 ).astype(_UpperCAmelCase ),
'''c''': np.ones(2 ).astype(_UpperCAmelCase ),
}
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , map_numpy=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_UpperCAmelCase , _UpperCAmelCase , map_numpy=_UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , map_numpy=_UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_UpperCAmelCase , _UpperCAmelCase , map_numpy=_UpperCAmelCase , num_proc=_UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda _UpperCAmelCase : x + 1 , _UpperCAmelCase , num_proc=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = {'''a''': 1, '''b''': 2}
__a : List[Any] = {'''a''': 3, '''b''': 4}
__a : Any = {'''a''': 5, '''b''': 6}
__a : List[str] = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = '''bar'''
__a : Union[str, Any] = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(_UpperCAmelCase , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __A ( a_ :str , a_ :Optional[Any] , a_ :List[Any]) -> str:
with patch('''datasets.utils.py_utils._single_map_nested''') as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''') as mock_multiprocessing_pool:
__a : Any = {F"""{i}""": i for i in range(a_)}
__a : Tuple = map_nested(lambda a_: x + 10 , a_ , num_proc=a_ , parallel_min_length=16)
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
@require_tf
def _lowerCamelCase ( self ):
import tensorflow as tf
from tensorflow.keras import layers
__a : Optional[int] = layers.Dense(2 )
def gen_random_output():
__a : Optional[Any] = tf.random.uniform((1, 3) )
return model(_UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_UpperCAmelCase ):
__a : List[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=_UpperCAmelCase ):
__a : Union[str, Any] = gen_random_output()
__a : str = gen_random_output()
np.testing.assert_equal(_UpperCAmelCase , _UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _lowerCamelCase ( self ):
import torch
def gen_random_output():
__a : int = torch.nn.Linear(3 , 2 )
__a : List[str] = torch.rand(1 , 3 )
return model(_UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_UpperCAmelCase ):
__a : Optional[Any] = gen_random_output()
with temp_seed(42 , set_pytorch=_UpperCAmelCase ):
__a : Any = gen_random_output()
__a : str = gen_random_output()
np.testing.assert_equal(_UpperCAmelCase , _UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _lowerCamelCase ( self ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
__a : str = gen_random_output()
with temp_seed(42 ):
__a : Union[str, Any] = gen_random_output()
__a : Union[str, Any] = gen_random_output()
np.testing.assert_equal(_UpperCAmelCase , _UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''' , [{}])
def __A ( a_ :str) -> int:
__a : Optional[Any] = NestedDataStructure(a_).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def __A ( a_ :Optional[int] , a_ :List[str]) -> str:
__a : Union[str, Any] = NestedDataStructure(a_).flatten()
assert output == expected_output
def __A ( ) -> List[Any]:
__a : Union[str, Any] = A(x=1 , y='''foobar''')
__a : List[Any] = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(a_) == expected_output
__a : Any = {'''a''': {'''b''': A(x=10 , y='''foo''')}, '''c''': [A(x=20 , y='''bar''')]}
__a : List[Any] = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(a_) == expected_output
with pytest.raises(a_):
asdict([1, A(x=10 , y='''foo''')])
def __A ( a_ :str) -> Any:
return text.split()
def __A ( a_ :List[str]) -> Dict:
yield (time.time(), content)
time.sleep(2)
yield (time.time(), content)
def __A ( ) -> str:
with Pool(2) as pool:
__a : List[str] = list(iflatmap_unordered(a_ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10))
assert out.count('''hello''') == 10
assert out.count('''there''') == 10
assert len(a_) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2) as pool:
__a : Any = list(iflatmap_unordered(a_ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10))
assert out.count('''hello''') == 10
assert out.count('''there''') == 10
assert len(a_) == 20
# check that we get items as fast as possible
with Pool(2) as pool:
__a : Any = []
for yield_time, content in iflatmap_unordered(
a_ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}]):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(a_)
assert out.count('''a''') == 2
assert out.count('''b''') == 2
assert len(a_) == 4
| 160
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
A = NewType('''DataClass''', Any)
A = NewType('''DataClassType''', Any)
def __A ( a_ :List[str]) -> Tuple:
if isinstance(a_ , a_):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""")
def __A ( a_ :list) -> Callable[[str], Any]:
__a : Any = {str(a_): choice for choice in choices}
return lambda a_: str_to_choice.get(a_ , a_)
def __A ( *,
a_ :Union[str, List[str]] = None , a_ :str = None , a_ :Any = dataclasses.MISSING , a_ :Callable[[], Any] = dataclasses.MISSING , a_ :dict = None , **a_ :str , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__a : List[Any] = {}
if aliases is not None:
__a : Optional[Any] = aliases
if help is not None:
__a : int = help
return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 42
def __init__( self , _UpperCAmelCase , **_UpperCAmelCase ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__a : str = ArgumentDefaultsHelpFormatter
super().__init__(**_UpperCAmelCase )
if dataclasses.is_dataclass(_UpperCAmelCase ):
__a : int = [dataclass_types]
__a : Optional[Any] = list(_UpperCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_UpperCAmelCase )
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = f"""--{field.name}"""
__a : Optional[int] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _UpperCAmelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__a : Dict = kwargs.pop('''aliases''' , [] )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = [aliases]
__a : Tuple = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(_UpperCAmelCase , '''UnionType''' ) and isinstance(_UpperCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_UpperCAmelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(_UpperCAmelCase ) not in field.type.__args__:
# filter `str` in Union
__a : List[str] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__a : List[str] = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__a : List[str] = (
field.type.__args__[0] if isinstance(_UpperCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
__a : Optional[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__a : Optional[int] = {}
if origin_type is Literal or (isinstance(field.type , _UpperCAmelCase ) and issubclass(field.type , _UpperCAmelCase )):
if origin_type is Literal:
__a : int = field.type.__args__
else:
__a : List[str] = [x.value for x in field.type]
__a : Any = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__a : Tuple = field.default
else:
__a : Optional[int] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__a : Any = copy(_UpperCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
__a : List[str] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__a : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__a : List[Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
__a : Union[str, Any] = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__a : List[Any] = True
elif isclass(_UpperCAmelCase ) and issubclass(_UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = field.type.__args__[0]
__a : Optional[int] = '''+'''
if field.default_factory is not dataclasses.MISSING:
__a : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
__a : List[Any] = True
else:
__a : int = field.type
if field.default is not dataclasses.MISSING:
__a : Optional[Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
__a : Optional[int] = field.default_factory()
else:
__a : Union[str, Any] = True
parser.add_argument(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__a : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if hasattr(_UpperCAmelCase , '''_argument_group_name''' ):
__a : Any = self.add_argument_group(dtype._argument_group_name )
else:
__a : Optional[Any] = self
try:
__a : Dict[str, type] = get_type_hints(_UpperCAmelCase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_UpperCAmelCase ):
__a : Union[str, Any] = '''.'''.join(map(_UpperCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(_UpperCAmelCase ):
if not field.init:
continue
__a : str = type_hints[field.name]
self._parse_dataclass_field(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__a : int = []
if args_filename:
args_files.append(Path(_UpperCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__a : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(_UpperCAmelCase , type=_UpperCAmelCase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__a , __a : List[Any] = args_file_parser.parse_known_args(args=_UpperCAmelCase )
__a : Union[str, Any] = vars(_UpperCAmelCase ).get(args_file_flag.lstrip('''-''' ) , _UpperCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_UpperCAmelCase ) for p in cmd_args_file_paths] )
__a : Union[str, Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__a : Dict = file_args + args if args is not None else file_args + sys.argv[1:]
__a , __a : str = self.parse_known_args(args=_UpperCAmelCase )
__a : Optional[int] = []
for dtype in self.dataclass_types:
__a : Optional[int] = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
__a : List[str] = {k: v for k, v in vars(_UpperCAmelCase ).items() if k in keys}
for k in keys:
delattr(_UpperCAmelCase , _UpperCAmelCase )
__a : int = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_UpperCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
__a : Tuple = set(args.keys() )
__a : List[str] = []
for dtype in self.dataclass_types:
__a : Dict = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
__a : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__a : Tuple = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_UpperCAmelCase )}""" )
return tuple(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
with open(Path(_UpperCAmelCase ) , encoding='''utf-8''' ) as open_json_file:
__a : int = json.loads(open_json_file.read() )
__a : str = self.parse_dict(_UpperCAmelCase , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
__a : Tuple = self.parse_dict(yaml.safe_load(Path(_UpperCAmelCase ).read_text() ) , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 160
| 1
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]=1024 , SCREAMING_SNAKE_CASE : List[str]=1024 , SCREAMING_SNAKE_CASE : List[Any]=False , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(a__ )
UpperCamelCase__ : Optional[int] = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''train''' , **a__ )
UpperCamelCase__ : List[Any] = tok.pad_token_id
def get_lens(SCREAMING_SNAKE_CASE : Any ):
UpperCamelCase__ : Optional[Any] = tqdm(
DataLoader(a__ , batch_size=512 , num_workers=8 , shuffle=a__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase__ : Tuple = []
for batch in dl:
UpperCamelCase__ : Dict = batch['''input_ids'''].ne(a__ ).sum(1 ).tolist()
UpperCamelCase__ : Dict = batch['''labels'''].ne(a__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(a__ , a__ ):
max_lens.append(max(a__ , a__ ) )
else:
max_lens.extend(a__ )
return max_lens
UpperCamelCase__ : List[Any] = get_lens(a__ )
UpperCamelCase__ : int = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''val''' , **a__ )
UpperCamelCase__ : str = get_lens(a__ )
pickle_save(a__ , train_ds.len_file )
pickle_save(a__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 370
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _a ( SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class __magic_name__ :
A: str = field(
metadata={"help": "The csv file to plot."} , )
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "Disable logarithmic scale when plotting"} , )
A: bool = field(
default=__lowerCAmelCase , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
A: Optional[str] = field(
default=__lowerCAmelCase , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
A: Optional[List[str]] = list_field(
default=__lowerCAmelCase , metadata={"help": "List of model names that are used instead of the ones in the csv file."})
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
class __magic_name__ :
def __init__( self : Any , lowerCamelCase__ : Dict ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : int = args
UpperCamelCase__ : Any = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCamelCase__ : Union[str, Any] = csv.DictReader(lowerCamelCase__ )
for row in reader:
UpperCamelCase__ : Union[str, Any] = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCamelCase__ : Any = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCamelCase__ : Any = float(row['''result'''] )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = plt.subplots()
UpperCamelCase__ : Dict = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCamelCase__ : int = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCamelCase__ : Tuple = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCamelCase__ : Tuple = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCamelCase__ : Dict = self.result_dict[model_name]['''result''']
((UpperCamelCase__) , (UpperCamelCase__)) : int = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCamelCase__ : Optional[int] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCamelCase__ : Optional[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCamelCase__ , )
else:
UpperCamelCase__ : Tuple = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCamelCase__) , (UpperCamelCase__)) : str = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCamelCase__ : Optional[Any] = np.asarray(lowerCamelCase__ , lowerCamelCase__ )[: len(lowerCamelCase__ )]
plt.scatter(
lowerCamelCase__ , lowerCamelCase__ , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(lowerCamelCase__ , lowerCamelCase__ , '''--''' )
title_str += F" {label_model_name} vs."
UpperCamelCase__ : Optional[Any] = title_str[:-4]
UpperCamelCase__ : List[Any] = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(lowerCamelCase__ )
plt.xlabel(lowerCamelCase__ )
plt.ylabel(lowerCamelCase__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = parser.parse_args_into_dataclasses()[0]
UpperCamelCase__ : Dict = Plot(args=SCREAMING_SNAKE_CASE )
plot.plot()
if __name__ == "__main__":
main()
| 51
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : int = '''pix2struct_text_model'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : List[str] = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self ,SCREAMING_SNAKE_CASE__=5_02_44 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=64 ,SCREAMING_SNAKE_CASE__=20_48 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=1_28 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=1E-6 ,SCREAMING_SNAKE_CASE__=1.0 ,SCREAMING_SNAKE_CASE__="gelu_new" ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=True ,**SCREAMING_SNAKE_CASE__ ,) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE :Dict = d_kv
__SCREAMING_SNAKE_CASE :Union[str, Any] = d_ff
__SCREAMING_SNAKE_CASE :Any = num_layers
__SCREAMING_SNAKE_CASE :List[str] = num_heads
__SCREAMING_SNAKE_CASE :Union[str, Any] = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE :Optional[Any] = relative_attention_max_distance
__SCREAMING_SNAKE_CASE :Optional[Any] = dropout_rate
__SCREAMING_SNAKE_CASE :Any = layer_norm_epsilon
__SCREAMING_SNAKE_CASE :List[str] = initializer_factor
__SCREAMING_SNAKE_CASE :Optional[int] = use_cache
__SCREAMING_SNAKE_CASE :Optional[int] = eos_token_id
__SCREAMING_SNAKE_CASE :Any = decoder_start_token_id
# for backwards compatibility
__SCREAMING_SNAKE_CASE :Any = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,tie_word_embeddings=SCREAMING_SNAKE_CASE__ ,is_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
@classmethod
def _UpperCamelCase ( cls ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__SCREAMING_SNAKE_CASE :Optional[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = '''pix2struct_vision_model'''
def __init__( self ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=20_48 ,SCREAMING_SNAKE_CASE__=64 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__="gelu_new" ,SCREAMING_SNAKE_CASE__=1E-6 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=1E-10 ,SCREAMING_SNAKE_CASE__=1.0 ,SCREAMING_SNAKE_CASE__=40_96 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=1_28 ,**SCREAMING_SNAKE_CASE__ ,) -> int:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE :Optional[Any] = patch_embed_hidden_size
__SCREAMING_SNAKE_CASE :Dict = d_ff
__SCREAMING_SNAKE_CASE :int = dropout_rate
__SCREAMING_SNAKE_CASE :int = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE :List[Any] = initializer_range
__SCREAMING_SNAKE_CASE :int = initializer_factor
__SCREAMING_SNAKE_CASE :Union[str, Any] = attention_dropout
__SCREAMING_SNAKE_CASE :Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE :int = dense_act_fn
__SCREAMING_SNAKE_CASE :Optional[Any] = seq_len
__SCREAMING_SNAKE_CASE :Dict = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE :int = relative_attention_max_distance
__SCREAMING_SNAKE_CASE :int = d_kv
@classmethod
def _UpperCamelCase ( cls ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__SCREAMING_SNAKE_CASE :Dict = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''pix2struct'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
def __init__( self ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=1.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=True ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[int]:
"""simple docstring"""
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if text_config is None:
__SCREAMING_SNAKE_CASE :Optional[int] = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
__SCREAMING_SNAKE_CASE :List[str] = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
__SCREAMING_SNAKE_CASE :Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = self.text_config.decoder_start_token_id
__SCREAMING_SNAKE_CASE :List[Any] = self.text_config.pad_token_id
__SCREAMING_SNAKE_CASE :Optional[Any] = self.text_config.eos_token_id
__SCREAMING_SNAKE_CASE :int = initializer_factor
__SCREAMING_SNAKE_CASE :int = initializer_range
__SCREAMING_SNAKE_CASE :Dict = self.initializer_range
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.initializer_range
__SCREAMING_SNAKE_CASE :List[str] = is_vqa
@classmethod
def _UpperCamelCase ( cls ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE :Optional[int] = self.text_config.to_dict()
__SCREAMING_SNAKE_CASE :List[Any] = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE :int = self.__class__.model_type
return output
| 191
|
"""simple docstring"""
import qiskit
def __lowerCamelCase ( a_ : int , a_ : int ) -> qiskit.result.counts.Counts:
__SCREAMING_SNAKE_CASE :Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__SCREAMING_SNAKE_CASE :Union[str, Any] = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__SCREAMING_SNAKE_CASE :Tuple = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase_ = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 191
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
UpperCamelCase__ = None
__A : List[str] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( _SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_snake_case ) != count_coins(_snake_case ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_UpperCAmelCase = get_distrib(node.left )
_UpperCAmelCase = get_distrib(node.right )
_UpperCAmelCase = 1 - left_distrib_excess
_UpperCAmelCase = 1 - right_distrib_excess
_UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_snake_case )
+ abs(_snake_case )
)
_UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_snake_case , _snake_case )
return get_distrib(_snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_SCREAMING_SNAKE_CASE ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
a =logging.getLogger(__name__)
@dataclass
class A_ :
_UpperCAmelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input training data file (a text file).'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase ( self : str):
if self.train_file is not None:
__lowerCamelCase : str = self.train_file.split('.')[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCamelCase : List[str] = self.validation_file.split('.')[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
_UpperCAmelCase : PreTrainedTokenizerBase
_UpperCAmelCase : Union[bool, str, PaddingStrategy] = True
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
def __call__( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Union[str, Any] = 'label' if 'label' in features[0].keys() else 'labels'
__lowerCamelCase : Any = [feature.pop(SCREAMING_SNAKE_CASE__) for feature in features]
__lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = len(features[0]['input_ids'])
__lowerCamelCase : Optional[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(SCREAMING_SNAKE_CASE__)] for feature in features
]
__lowerCamelCase : List[Any] = list(chain(*SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[str] = self.tokenizer.pad(
SCREAMING_SNAKE_CASE__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
__lowerCamelCase : Optional[int] = {k: v.view(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,-1) for k, v in batch.items()}
# Add back labels
__lowerCamelCase : str = torch.tensor(SCREAMING_SNAKE_CASE__ ,dtype=torch.intaa)
return batch
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , lowerCamelCase__ , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCamelCase : int = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowerCamelCase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCamelCase : Union[str, Any] = {}
if data_args.train_file is not None:
__lowerCamelCase : Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
__lowerCamelCase : List[str] = data_args.validation_file
__lowerCamelCase : Optional[Any] = data_args.train_file.split('.' )[-1]
__lowerCamelCase : Optional[Any] = load_dataset(
lowerCamelCase__ , data_files=lowerCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCamelCase : Optional[Any] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCamelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCamelCase : Tuple = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCamelCase : Optional[Any] = [F"ending{i}" for i in range(4 )]
__lowerCamelCase : int = 'sent1'
__lowerCamelCase : Optional[Any] = 'sent2'
if data_args.max_seq_length is None:
__lowerCamelCase : List[Any] = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__lowerCamelCase : Union[str, Any] = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
__lowerCamelCase : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase__ ):
__lowerCamelCase : Any = [[context] * 4 for context in examples[context_name]]
__lowerCamelCase : Dict = examples[question_header_name]
__lowerCamelCase : List[str] = [
[F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(lowerCamelCase__ )
]
# Flatten out
__lowerCamelCase : Any = list(chain(*lowerCamelCase__ ) )
__lowerCamelCase : List[str] = list(chain(*lowerCamelCase__ ) )
# Tokenize
__lowerCamelCase : List[Any] = tokenizer(
lowerCamelCase__ , lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__lowerCamelCase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
__lowerCamelCase : Optional[Any] = min(len(lowerCamelCase__ ) , data_args.max_train_samples )
__lowerCamelCase : Any = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__lowerCamelCase : Union[str, Any] = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__lowerCamelCase : Tuple = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__lowerCamelCase : Optional[int] = min(len(lowerCamelCase__ ) , data_args.max_eval_samples )
__lowerCamelCase : Optional[int] = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__lowerCamelCase : Dict = eval_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCamelCase : Dict = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase__ ):
__lowerCamelCase , __lowerCamelCase : List[str] = eval_predictions
__lowerCamelCase : Union[str, Any] = np.argmax(lowerCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCamelCase : List[str] = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , )
# Training
if training_args.do_train:
__lowerCamelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__lowerCamelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCamelCase : int = last_checkpoint
__lowerCamelCase : Optional[int] = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCamelCase : int = train_result.metrics
__lowerCamelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
__lowerCamelCase : Tuple = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics('train' , lowerCamelCase__ )
trainer.save_metrics('train' , lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCamelCase : Tuple = trainer.evaluate()
__lowerCamelCase : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics('eval' , lowerCamelCase__ )
trainer.save_metrics('eval' , lowerCamelCase__ )
__lowerCamelCase : Any = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase__ )
else:
trainer.create_model_card(**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 73
|
import qiskit
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> qiskit.result.counts.Counts:
__lowerCamelCase : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__lowerCamelCase : List[str] = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowerCamelCase : List[Any] = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 73
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( _A , _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = IFInpaintingSuperResolutionPipeline
lowerCAmelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : Tuple ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : Tuple , __snake_case : Optional[int] , __snake_case : str=0 ):
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase_ = torch.manual_seed(__snake_case )
else:
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self : int ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase_ ( self : List[str] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowerCamelCase_ ( self : Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase_ ( self : int ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase_ ( self : Union[str, Any] ):
self._test_save_load_local()
def lowerCamelCase_ ( self : List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 177
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_05_22, type=int)
_lowerCamelCase = parser.parse_args()
logger.info(F"Loading data from {args.data_file}")
with open(args.data_file, 'rb') as fp:
_lowerCamelCase = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
_lowerCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCamelCase = [0] * args.vocab_size
for k, v in counter.items():
_lowerCamelCase = v
logger.info(F"Dump to {args.token_counts_dump}")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 177
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
|
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case : Optional[int] = str(bin(lowercase ) )
binary_number += "0" * shift_amount
return binary_number
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case : Dict = str(bin(lowercase ) )[2:]
if shift_amount >= len(lowercase ):
return "0b0"
snake_case : str = binary_number[: len(lowercase ) - shift_amount]
return "0b" + shifted_binary_number
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
if number >= 0: # Get binary representation of positive number
snake_case : Optional[Any] = """0""" + str(bin(lowercase ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case : Dict = len(bin(lowercase )[3:] ) # Find 2's complement of number
snake_case : Optional[Any] = bin(abs(lowercase ) - (1 << binary_number_length) )[3:]
snake_case : Tuple = (
"""1""" + """0""" * (binary_number_length - len(lowercase )) + binary_number
)
if shift_amount >= len(lowercase ):
return "0b" + binary_number[0] * len(lowercase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowercase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124
| 0
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if exponent == 1:
return base
if exponent % 2 == 0:
A_ : List[str] = _modexpt(lowerCAmelCase__ , exponent // 2 , lowerCAmelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCAmelCase__ , exponent - 1 , lowerCAmelCase__ )) % modulo_value
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 1_777 , SCREAMING_SNAKE_CASE = 1_855 , SCREAMING_SNAKE_CASE = 8 ):
A_ : List[str] = base
for _ in range(1 , lowerCAmelCase__ ):
A_ : Any = _modexpt(lowerCAmelCase__ , lowerCAmelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 366
|
from collections import deque
from .hash_table import HashTable
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : List[str] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_SCREAMING_SNAKE_CASE )
A_ : Tuple = self.values[key]
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return (
sum(self.charge_factor - len(_SCREAMING_SNAKE_CASE ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )->Any:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_SCREAMING_SNAKE_CASE ) == 0
):
return key
return super()._collision_resolution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 65
| 0
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_A : str =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_A : list[int] =[ord(letter) for letter in string.ascii_lowercase]
_A : set[int] ={ord(char) for char in VALID_CHARS}
_A : list[str] =["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> str | None:
lowerCamelCase__ : str = ""
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
for keychar, cipherchar in zip(cycle(UpperCamelCase ) , UpperCamelCase ):
lowerCamelCase__ : List[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCamelCase )
return decoded
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[str]:
lowerCamelCase__ : list[str] = []
for key in product(UpperCamelCase , repeat=3 ):
lowerCamelCase__ : List[Any] = try_key(UpperCamelCase , UpperCamelCase )
if encoded is not None:
possibles.append(UpperCamelCase )
return possibles
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "p059_cipher.txt" ) -> int:
lowerCamelCase__ : list[int]
lowerCamelCase__ : list[str]
lowerCamelCase__ : str
lowerCamelCase__ : str
lowerCamelCase__ : str = Path(UpperCamelCase ).parent.joinpath(UpperCamelCase ).read_text(encoding="""utf-8""" )
lowerCamelCase__ : List[Any] = [int(UpperCamelCase ) for number in data.strip().split(""",""" )]
lowerCamelCase__ : Optional[int] = filter_valid_chars(UpperCamelCase )
for common_word in COMMON_WORDS:
lowerCamelCase__ : Dict = filter_common_word(UpperCamelCase , UpperCamelCase )
if len(UpperCamelCase ) == 1:
break
lowerCamelCase__ : Union[str, Any] = possibles[0]
return sum(ord(UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 41
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
return abs(UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCamelCase__ , lowerCamelCase__ : Tuple = y, x % y
return abs(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Tuple:
try:
lowerCamelCase__ : Dict = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowerCamelCase__ : Any = int(nums[0] )
lowerCamelCase__ : Optional[Any] = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(UpperCamelCase , UpperCamelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCamelCase , UpperCamelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 41
| 1
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE_( *lowercase , **lowercase ) -> Optional[int]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = ObjectDetectionPipeline(model=lowercase , image_processor=lowercase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowercase , {
"score": ANY(lowercase ),
"label": ANY(lowercase ),
"box": {"xmin": ANY(lowercase ), "ymin": ANY(lowercase ), "xmax": ANY(lowercase ), "ymax": ANY(lowercase )},
} , )
import datasets
lowerCamelCase_ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowerCamelCase_ = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
lowerCamelCase_ = object_detector(lowercase , threshold=0.0 )
self.assertEqual(len(lowercase ) , len(lowercase ) )
for outputs in batch_outputs:
self.assertGreater(len(lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowercase , {
"score": ANY(lowercase ),
"label": ANY(lowercase ),
"box": {"xmin": ANY(lowercase ), "ymin": ANY(lowercase ), "xmax": ANY(lowercase ), "ymax": ANY(lowercase )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
pass
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = "hf-internal-testing/tiny-detr-mobilenetsv3"
lowerCamelCase_ = AutoModelForObjectDetection.from_pretrained(lowercase )
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(lowercase )
lowerCamelCase_ = ObjectDetectionPipeline(model=lowercase , feature_extractor=lowercase )
lowerCamelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
lowerCamelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = "facebook/detr-resnet-50"
lowerCamelCase_ = AutoModelForObjectDetection.from_pretrained(lowercase )
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(lowercase )
lowerCamelCase_ = ObjectDetectionPipeline(model=lowercase , feature_extractor=lowercase )
lowerCamelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
lowerCamelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = "facebook/detr-resnet-50"
lowerCamelCase_ = pipeline("object-detection" , model=lowercase )
lowerCamelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
lowerCamelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = 0.9_9_8_5
lowerCamelCase_ = "facebook/detr-resnet-50"
lowerCamelCase_ = pipeline("object-detection" , model=lowercase )
lowerCamelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=lowercase )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = "Narsil/layoutlmv3-finetuned-funsd"
lowerCamelCase_ = 0.9_9_9_3
lowerCamelCase_ = pipeline("object-detection" , model=lowercase , threshold=lowercase )
lowerCamelCase_ = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 47
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A =True
except ImportError:
__A =False
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( lowerCamelCase__ ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
lowerCamelCase_ = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=lowercase , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=lowercase , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , lowercase , lowercase=None , *lowercase ) -> List[str]:
lowerCamelCase_ = testing
lowerCamelCase_ = testing_file
lowerCamelCase_ = path
def SCREAMING_SNAKE_CASE_( self ) -> str:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(lowercase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
lowerCamelCase_ = (
Path(lowercase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase_ = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
lowerCamelCase_ = json.load(lowercase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase , extra_context=lowercase , )
lowerCamelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
lowerCamelCase_ = json.load(lowercase )
lowerCamelCase_ = configuration["lowercase_modelname"]
lowerCamelCase_ = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
lowerCamelCase_ = "PyTorch" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = "TensorFlow" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = "Flax" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(lowercase , exist_ok=lowercase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowercase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , "w" ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(lowercase ):
with open(lowercase , "r" ) as f:
lowerCamelCase_ = f.readlines()
with open(lowercase , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase , lowercase , lowercase ):
# Create temp file
lowerCamelCase_ , lowerCamelCase_ = mkstemp()
lowerCamelCase_ = False
with fdopen(lowercase , "w" ) as new_file:
with open(lowercase ) as old_file:
for line in old_file:
new_file.write(lowercase )
if line_to_copy_below in line:
lowerCamelCase_ = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(lowercase , lowercase )
# Remove original file
remove(lowercase )
# Move new file
move(lowercase , lowercase )
def skip_units(lowercase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase ):
with open(lowercase ) as datafile:
lowerCamelCase_ = []
lowerCamelCase_ = False
lowerCamelCase_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase_ = line.split("\"" )[1]
lowerCamelCase_ = skip_units(lowercase )
elif "# Below: " in line and "##" not in line:
lowerCamelCase_ = line.split("\"" )[1]
lowerCamelCase_ = skip_units(lowercase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase , lowercase , lowercase )
lowerCamelCase_ = []
elif "# Replace with" in line and "##" not in line:
lowerCamelCase_ = []
elif "##" not in line:
lines_to_copy.append(lowercase )
remove(lowercase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(lowercase )
| 47
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : int = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if len(lowerCamelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__lowerCamelCase : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[list, list, list, list]:
if len(lowerCamelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__lowerCamelCase : Tuple = len(lowerCamelCase__ )
__lowerCamelCase : List[Any] = matrix_length // 2
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : str = [
[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )
]
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : Optional[Any] = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[int, int]:
return len(lowerCamelCase__ ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
print('\n'.join(str(lowerCamelCase__ ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase : str = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : List[str] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : List[Any] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Tuple = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Any = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
# construct the new matrix from our 4 quadrants
__lowerCamelCase : List[Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ )[1] != matrix_dimensions(lowerCamelCase__ )[0]:
__lowerCamelCase : Any = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(lowerCamelCase__ )
__lowerCamelCase : str = matrix_dimensions(lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_dimensions(lowerCamelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCamelCase : str = max(*lowerCamelCase__ , *lowerCamelCase__ )
__lowerCamelCase : List[str] = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase__ ) ) ) )
__lowerCamelCase : Any = matrixa
__lowerCamelCase : int = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCamelCase : List[str] = actual_strassen(lowerCamelCase__ , lowerCamelCase__ )
# Removing the additional zeros
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 73
| 0
|
import copy
import random
from transformers import CLIPTokenizer
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
__UpperCamelCase : int = {}
def __lowerCamelCase ( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) -> int:
'''simple docstring'''
__UpperCamelCase : int = super().add_tokens(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
" `placeholder_token` that is not already in the tokenizer." )
def __lowerCamelCase ( self , __UpperCamelCase , *__UpperCamelCase , __UpperCamelCase=1 , **__UpperCamelCase ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
output.append(__UpperCamelCase )
else:
__UpperCamelCase : Optional[Any] = []
for i in range(__UpperCamelCase ):
__UpperCamelCase : Any = placeholder_token + f'''_{i}'''
self.try_adding_tokens(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
output.append(__UpperCamelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
__UpperCamelCase : Optional[int] = output
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=1.0 ) -> Any:
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase : Tuple = []
for i in range(len(__UpperCamelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__UpperCamelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__UpperCamelCase : List[Any] = self.token_map[placeholder_token]
__UpperCamelCase : Union[str, Any] = tokens[: 1 + int(len(__UpperCamelCase ) * prop_tokens_to_load )]
if vector_shuffle:
__UpperCamelCase : int = copy.copy(__UpperCamelCase )
random.shuffle(__UpperCamelCase )
__UpperCamelCase : Any = text.replace(__UpperCamelCase , " ".join(__UpperCamelCase ) )
return text
def __call__( self , __UpperCamelCase , *__UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=1.0 , **__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
__UpperCamelCase , vector_shuffle=__UpperCamelCase , prop_tokens_to_load=__UpperCamelCase ) , *__UpperCamelCase , **__UpperCamelCase , )
def __lowerCamelCase ( self , __UpperCamelCase , *__UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=1.0 , **__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
__UpperCamelCase , vector_shuffle=__UpperCamelCase , prop_tokens_to_load=__UpperCamelCase ) , *__UpperCamelCase , **__UpperCamelCase , )
| 363
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : int = logging.get_logger(__name__)
lowercase : Optional[int] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
for attribute in key.split("." ):
__UpperCamelCase : Any = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
__UpperCamelCase : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
__UpperCamelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCamelCase : Dict = value
elif weight_type == "weight_g":
__UpperCamelCase : Union[str, Any] = value
elif weight_type == "weight_v":
__UpperCamelCase : Union[str, Any] = value
elif weight_type == "bias":
__UpperCamelCase : str = value
else:
__UpperCamelCase : Union[str, Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ (_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ):
__UpperCamelCase : Optional[int] = []
__UpperCamelCase : List[Any] = fairseq_model.state_dict()
__UpperCamelCase : List[str] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase : Any = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
__UpperCamelCase : Any = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase : Tuple = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
__UpperCamelCase : Dict = True
if "*" in mapped_key:
__UpperCamelCase : str = name.split(_lowerCAmelCase )[0].split("." )[-2]
__UpperCamelCase : Optional[Any] = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
__UpperCamelCase : Any = "weight_g"
elif "weight_v" in name:
__UpperCamelCase : Optional[int] = "weight_v"
elif "weight" in name:
__UpperCamelCase : str = "weight"
elif "bias" in name:
__UpperCamelCase : List[str] = "bias"
else:
__UpperCamelCase : Optional[Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase_ (_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ):
__UpperCamelCase : Tuple = full_name.split("conv_layers." )[-1]
__UpperCamelCase : Dict = name.split("." )
__UpperCamelCase : Optional[int] = int(items[0] )
__UpperCamelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCamelCase : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCamelCase : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCamelCase : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCamelCase : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def UpperCAmelCase_ (_lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=True ):
if config_path is not None:
__UpperCamelCase : Dict = HubertConfig.from_pretrained(_lowerCAmelCase )
else:
__UpperCamelCase : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase : int = Dictionary.load(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase : Optional[Any] = target_dict.pad_index
__UpperCamelCase : Any = target_dict.bos_index
__UpperCamelCase : List[str] = target_dict.eos_index
__UpperCamelCase : Tuple = len(target_dict.symbols )
__UpperCamelCase : str = os.path.join(_lowerCAmelCase , "vocab.json" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCAmelCase )
__UpperCamelCase : int = WavaVecaCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCAmelCase , )
__UpperCamelCase : List[Any] = True if config.feat_extract_norm == "layer" else False
__UpperCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
__UpperCamelCase : int = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
__UpperCamelCase : Optional[Any] = HubertForCTC(_lowerCAmelCase )
else:
__UpperCamelCase : Union[str, Any] = HubertModel(_lowerCAmelCase )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase : Optional[Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase : List[str] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 171
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __snake_case ( _lowercase):
snake_case__ : Dict = "Salesforce/blip-image-captioning-base"
snake_case__ : Dict = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
snake_case__ : Tuple = "image_captioner"
snake_case__ : Optional[Any] = AutoModelForVisionaSeq
snake_case__ : List[str] = ["image"]
snake_case__ : int = ["text"]
def __init__( self : Optional[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : Dict ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : "Image" ):
"""simple docstring"""
return self.pre_processor(images=__lowerCAmelCase , return_tensors='''pt''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Any ):
"""simple docstring"""
return self.model.generate(**__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.pre_processor.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )[0].strip()
| 72
|
"""simple docstring"""
import math
def snake_case_ ( A_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( A_ : float = 0.1 ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = 3
_lowerCamelCase : List[str] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ):
primes += is_prime(A_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
| 1
|
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def _a ( _lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def _a ( _lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def _a ( _lowerCamelCase , _lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
__snake_case : Optional[Any] = np.zeros_like(_lowerCamelCase )
__snake_case : List[Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__snake_case : Dict = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__snake_case : Dict = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__snake_case : Optional[int] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__UpperCamelCase = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
__UpperCamelCase = np.array(Image.open(lena_path))
# kernel to be applied
__UpperCamelCase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__UpperCamelCase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__UpperCamelCase = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 13
|
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase = logging.getLogger(__name__)
class _A ( __lowercase ):
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
super().__init__(
__magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , )
__snake_case : List[str] = None
def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__snake_case : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case : List[str] = str(distributed_port + 1 )
__snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ )
dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group )
return target_tensor
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ )
return ifname
def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ )
# distributed training
__snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
__snake_case : Tuple = None
if self._is_main():
__snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )]
dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group )
# scatter logic
__snake_case : Optional[int] = question_hidden_states.shape[0]
__snake_case : Optional[Any] = []
__snake_case : Any = []
if self._is_main():
assert len(__magic_name__ ) == world_size
__snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ )
__snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa )
__snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
| 13
| 1
|
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def lowercase_ ( ):
lowercase__ : Dict = Github(os.environ["GITHUB_TOKEN"])
lowercase__ : Optional[int] = g.get_repo("huggingface/transformers")
lowercase__ : Optional[Any] = repo.get_issues(state="open")
for issue in open_issues:
lowercase__ : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCamelCase: i.created_at , reverse=_lowerCamelCase)
lowercase__ : Dict = comments[0] if len(_lowerCamelCase) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
if __name__ == "__main__":
main()
| 87
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 2
@add_end_docstrings(_lowerCamelCase )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self ,*a_ ,**a_ ) -> Union[str, Any]:
super().__init__(*a_ ,**a_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCAmelCase : List[str] = None
if self.model.config.prefix is not None:
_UpperCAmelCase : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCAmelCase : Union[str, Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : List[Any] = self._sanitize_parameters(prefix=a_ ,**self._forward_params )
_UpperCAmelCase : Optional[int] = {**self._preprocess_params, **preprocess_params}
_UpperCAmelCase : List[Any] = {**self._forward_params, **forward_params}
def _snake_case ( self ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,**a_ ,) -> Dict:
_UpperCAmelCase : int = {}
if prefix is not None:
_UpperCAmelCase : Union[str, Any] = prefix
if prefix:
_UpperCAmelCase : Union[str, Any] = self.tokenizer(
a_ ,padding=a_ ,add_special_tokens=a_ ,return_tensors=self.framework )
_UpperCAmelCase : Optional[int] = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
""" [None, 'hole']""" )
_UpperCAmelCase : Optional[Any] = handle_long_generation
preprocess_params.update(a_ )
_UpperCAmelCase : str = generate_kwargs
_UpperCAmelCase : str = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
_UpperCAmelCase : Any = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
_UpperCAmelCase : Tuple = ReturnType.TENSORS
if return_type is not None:
_UpperCAmelCase : int = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase : str = self.tokenizer.encode(a_ ,add_special_tokens=a_ )
if len(a_ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_UpperCAmelCase : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self ,*a_ ,**a_ ) -> Dict:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*a_ ,**a_ )
def __call__( self ,a_ ,**a_ ) -> str:
return super().__call__(a_ ,**a_ )
def _snake_case ( self ,a_ ,a_="" ,a_=None ,**a_ ) -> Optional[Any]:
_UpperCAmelCase : str = self.tokenizer(
prefix + prompt_text ,padding=a_ ,add_special_tokens=a_ ,return_tensors=self.framework )
_UpperCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
_UpperCAmelCase : Dict = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCAmelCase : str = generate_kwargs["""max_new_tokens"""]
else:
_UpperCAmelCase : Optional[int] = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCAmelCase : str = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
_UpperCAmelCase : Optional[Any] = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCAmelCase : Optional[int] = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def _snake_case ( self ,a_ ,**a_ ) -> Union[str, Any]:
_UpperCAmelCase : Optional[Any] = model_inputs["""input_ids"""]
_UpperCAmelCase : List[str] = model_inputs.get("""attention_mask""" ,a_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : int = 1
else:
_UpperCAmelCase : List[str] = input_ids.shape[0]
_UpperCAmelCase : Any = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCAmelCase : List[Any] = generate_kwargs.pop("""prefix_length""" ,0 )
if prefix_length > 0:
_UpperCAmelCase : Optional[int] = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCAmelCase : Optional[int] = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCAmelCase : str = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCAmelCase : Optional[int] = self.model.generate(input_ids=a_ ,attention_mask=a_ ,**a_ )
_UpperCAmelCase : Dict = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCAmelCase : Optional[int] = generated_sequence.reshape(a_ ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase : Union[str, Any] = tf.reshape(a_ ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _snake_case ( self ,a_ ,a_=ReturnType.FULL_TEXT ,a_=True ) -> List[str]:
_UpperCAmelCase : Optional[Any] = model_outputs["""generated_sequence"""][0]
_UpperCAmelCase : Optional[int] = model_outputs["""input_ids"""]
_UpperCAmelCase : List[str] = model_outputs["""prompt_text"""]
_UpperCAmelCase : Optional[Any] = generated_sequence.numpy().tolist()
_UpperCAmelCase : Dict = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase : str = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCAmelCase : Tuple = self.tokenizer.decode(
a_ ,skip_special_tokens=a_ ,clean_up_tokenization_spaces=a_ ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCAmelCase : Union[str, Any] = 0
else:
_UpperCAmelCase : Tuple = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=a_ ,clean_up_tokenization_spaces=a_ ,) )
if return_type == ReturnType.FULL_TEXT:
_UpperCAmelCase : Any = prompt_text + text[prompt_length:]
else:
_UpperCAmelCase : Dict = text[prompt_length:]
_UpperCAmelCase : Union[str, Any] = {"""generated_text""": all_text}
records.append(a_ )
return records
| 215
| 0
|
import enum
import shutil
import sys
_SCREAMING_SNAKE_CASE = shutil.get_terminal_size()
_SCREAMING_SNAKE_CASE = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class SCREAMING_SNAKE_CASE_ ( enum.Enum ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
def lowercase( UpperCamelCase_ , UpperCamelCase_="" ) -> List[Any]:
'''simple docstring'''
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="" ) -> Optional[Any]:
'''simple docstring'''
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , lowerCAmelCase__ )
def lowercase( ) -> Dict:
'''simple docstring'''
forceWrite("""\r""" )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def lowercase( ) -> int:
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def lowercase( ) -> Dict:
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 360
|
def lowercase( UpperCamelCase_ = 1000000 ) -> int:
'''simple docstring'''
UpperCamelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , UpperCamelCase_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 165
| 0
|
'''simple docstring'''
from maths.prime_factors import prime_factors
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
snake_case_ = f'Input value of [number={number}] must be an integer'
raise TypeError(snake_case )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(snake_case ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''llama'''
UpperCAmelCase__ : Dict = ['''past_key_values''']
def __init__( self : str , _snake_case : List[str]=32000 , _snake_case : int=4096 , _snake_case : List[str]=11008 , _snake_case : Optional[int]=32 , _snake_case : List[Any]=32 , _snake_case : Tuple=None , _snake_case : int="silu" , _snake_case : List[Any]=2048 , _snake_case : List[str]=0.0_2 , _snake_case : Any=1e-6 , _snake_case : List[str]=True , _snake_case : Optional[Any]=0 , _snake_case : Dict=1 , _snake_case : List[Any]=2 , _snake_case : str=1 , _snake_case : Union[str, Any]=False , _snake_case : str=None , **_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _snake_case)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _snake_case)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""")
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""")
| 51
| 0
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = k.replace(_A , _A )
if k.startswith("encoder" ):
SCREAMING_SNAKE_CASE__ : Tuple = k.replace(".attn" , ".self_attn" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = k.replace("norm1" , "self_attn_layer_norm" )
SCREAMING_SNAKE_CASE__ : Dict = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
SCREAMING_SNAKE_CASE__ : Any = k.replace("norm1" , "self_attn_layer_norm" )
SCREAMING_SNAKE_CASE__ : int = k.replace("norm2" , "encoder_attn_layer_norm" )
SCREAMING_SNAKE_CASE__ : List[Any] = k.replace("norm3" , "final_layer_norm" )
return k
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
SCREAMING_SNAKE_CASE__ : int = sd.pop(_A )
SCREAMING_SNAKE_CASE__ : Optional[int] = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
SCREAMING_SNAKE_CASE__ : Optional[int] = v
_lowerCamelCase : Any = ['START']
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(_A , map_location="cpu" )
SCREAMING_SNAKE_CASE__ : Tuple = model["model"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BlenderbotConfig.from_json_file(_A )
SCREAMING_SNAKE_CASE__ : List[Any] = BlenderbotForConditionalGeneration(_A )
SCREAMING_SNAKE_CASE__ : int = m.model.state_dict().keys()
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
SCREAMING_SNAKE_CASE__ : int = rename_state_dict_key(_A )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_A )
m.model.load_state_dict(_A , strict=_A )
m.half()
m.save_pretrained(_A )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
_lowerCamelCase : str = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 355
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any], _UpperCAmelCase : List[str], _UpperCAmelCase : Any=2, _UpperCAmelCase : List[str]=3, _UpperCAmelCase : Union[str, Any]=4, _UpperCAmelCase : Union[str, Any]=2, _UpperCAmelCase : int=7, _UpperCAmelCase : Tuple=True, _UpperCAmelCase : int=True, _UpperCAmelCase : List[str]=True, _UpperCAmelCase : List[str]=True, _UpperCAmelCase : Tuple=9_9, _UpperCAmelCase : Any=3_6, _UpperCAmelCase : List[str]=2, _UpperCAmelCase : int=4, _UpperCAmelCase : str=3_7, _UpperCAmelCase : List[str]="gelu", _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : str=0.1, _UpperCAmelCase : Optional[int]=5_1_2, _UpperCAmelCase : Optional[Any]=1_6, _UpperCAmelCase : int=2, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Optional[int]=6, _UpperCAmelCase : List[Any]=6, _UpperCAmelCase : Any=3, _UpperCAmelCase : List[str]=4, _UpperCAmelCase : Tuple=None, _UpperCAmelCase : str=1_0_0_0, ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
SCREAMING_SNAKE_CASE__ : List[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[Any] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = coordinate_size
SCREAMING_SNAKE_CASE__ : Tuple = shape_size
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = num_choices
SCREAMING_SNAKE_CASE__ : Optional[Any] = scope
SCREAMING_SNAKE_CASE__ : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE__ : str = text_seq_length
SCREAMING_SNAKE_CASE__ : Dict = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE__ : List[str] = self.text_seq_length + self.image_seq_length
def A_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
SCREAMING_SNAKE_CASE__ : List[Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE__ : Tuple = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE__ : Tuple = bbox[i, j, 2]
SCREAMING_SNAKE_CASE__ : str = bbox[i, j, 0]
SCREAMING_SNAKE_CASE__ : Dict = tmp_coordinate
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.constant(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any, _UpperCAmelCase : List[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFLayoutLMvaModel(config=_UpperCAmelCase )
# text + image
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, pixel_values=_UpperCAmelCase, training=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, training=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : List[Any] = model(_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase, training=_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model({"pixel_values": pixel_values}, training=_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def A_ ( self : List[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : int, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Dict, _UpperCAmelCase : List[str], _UpperCAmelCase : Dict, _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.num_labels
SCREAMING_SNAKE_CASE__ : List[str] = TFLayoutLMvaForSequenceClassification(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(
_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase, training=_UpperCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A_ ( self : List[Any], _UpperCAmelCase : str, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any, _UpperCAmelCase : List[str], _UpperCAmelCase : List[str], _UpperCAmelCase : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.num_labels
SCREAMING_SNAKE_CASE__ : Dict = TFLayoutLMvaForTokenClassification(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = model(
_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase, training=_UpperCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def A_ ( self : Dict, _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int, _UpperCAmelCase : Any, _UpperCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2
SCREAMING_SNAKE_CASE__ : str = TFLayoutLMvaForQuestionAnswering(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = model(
_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, start_positions=_UpperCAmelCase, end_positions=_UpperCAmelCase, training=_UpperCAmelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def A_ ( self : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__)) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : List[str] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Any, _UpperCAmelCase : Dict, _UpperCAmelCase : str, _UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return True
def A_ ( self : Optional[int], _UpperCAmelCase : Optional[int], _UpperCAmelCase : str, _UpperCAmelCase : Dict=False ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = copy.deepcopy(_UpperCAmelCase )
if model_class in get_values(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = {
k: tf.tile(tf.expand_dims(_UpperCAmelCase, 1 ), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_UpperCAmelCase, tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = tf.ones(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=tf.intaa )
return inputs_dict
def A_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self, config_class=_UpperCAmelCase, hidden_size=3_7 )
def A_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
if getattr(_UpperCAmelCase, "hf_compute_loss", _UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(inputs_dict.copy(), _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=_UpperCAmelCase )[0]
]
SCREAMING_SNAKE_CASE__ : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(inputs_dict.copy(), _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = prepared_for_class.pop("input_ids" )
SCREAMING_SNAKE_CASE__ : Dict = model(_UpperCAmelCase, **_UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(inputs_dict.copy(), _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE__ : str = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE__ : Any = -1_0_0
SCREAMING_SNAKE_CASE__ : Dict = tf.convert_to_tensor(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, **_UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(inputs_dict.copy(), _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(inputs_dict.copy(), _UpperCAmelCase, return_labels=_UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE__ : List[Any] = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE__ : Dict = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE__ : Tuple = {0: "input_ids"}
for label_key in label_keys:
SCREAMING_SNAKE_CASE__ : str = signature_names.index(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = label_key
SCREAMING_SNAKE_CASE__ : str = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE__ : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE__ : int = prepared_for_class[value]
SCREAMING_SNAKE_CASE__ : List[Any] = tuple(_UpperCAmelCase )
# Send to model
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def A_ ( self : Dict ) -> int:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : List[Any] ) -> int:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : Optional[int] = type
self.model_tester.create_and_check_model(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Any ) -> int:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = TFLayoutLMvaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _a ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase ) if is_vision_available() else None
@slow
def A_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Dict = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=_UpperCAmelCase, return_tensors="tf" ).pixel_values
SCREAMING_SNAKE_CASE__ : int = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE__ : int = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ), axis=0 )
# forward pass
SCREAMING_SNAKE_CASE__ : Dict = model(input_ids=_UpperCAmelCase, bbox=_UpperCAmelCase, pixel_values=_UpperCAmelCase, training=_UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], _UpperCAmelCase, atol=1E-4 ) )
| 191
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : str = logging.get_logger(__name__)
A : List[Any] = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""distilbert"""
__UpperCAmelCase : List[str] ={
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , __a=3_05_22 , __a=5_12 , __a=False , __a=6 , __a=12 , __a=7_68 , __a=4 * 7_68 , __a=0.1 , __a=0.1 , __a="gelu" , __a=0.0_2 , __a=0.1 , __a=0.2 , __a=0 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = sinusoidal_pos_embds
__lowerCAmelCase = n_layers
__lowerCAmelCase = n_heads
__lowerCAmelCase = dim
__lowerCAmelCase = hidden_dim
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation
__lowerCAmelCase = initializer_range
__lowerCAmelCase = qa_dropout
__lowerCAmelCase = seq_classif_dropout
super().__init__(**__a , pad_token_id=__a )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def snake_case ( self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 57
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
snake_case : List[Any] = logging.getLogger(__name__)
snake_case : Optional[int] = 50 # max width of layer names
snake_case : Any = 70 # max width of quantizer names
def __lowercase ( __lowerCAmelCase : Tuple ):
a__ = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=__lowerCAmelCase , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=__lowerCAmelCase , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=__lowerCAmelCase , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=__lowerCAmelCase , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=__lowerCAmelCase , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=__lowerCAmelCase , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
if args.calibrator == "max":
a__ = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
a__ = 'histogram'
elif args.calibrator == "mse":
a__ = 'histogram'
else:
raise ValueError(F'Invalid calibrator {args.calibrator}' )
a__ = QuantDescriptor(num_bits=args.aprec , calib_method=__lowerCAmelCase )
a__ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__lowerCAmelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False ):
logger.info('Configuring Model for Quantization' )
logger.info(F'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__lowerCAmelCase , ['embeddings'] , which='weight' , _disabled=__lowerCAmelCase )
if args.quant_disable:
set_quantizer_by_name(__lowerCAmelCase , [''] , _disabled=__lowerCAmelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(__lowerCAmelCase , args.quant_disable_keyword , _disabled=__lowerCAmelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__lowerCAmelCase , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=__lowerCAmelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__lowerCAmelCase , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=__lowerCAmelCase )
if args.recalibrate_weights:
recalibrate_weights(__lowerCAmelCase )
if args.fuse_qkv:
fuse_qkv(__lowerCAmelCase , __lowerCAmelCase )
if args.clip_gelu:
clip_gelu(__lowerCAmelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] ):
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'{name:80}: {module}' )
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ):
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ):
def fusea(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
for mod in [qq, qk, qv]:
if not hasattr(__lowerCAmelCase , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
a__ = qq._amax.detach().item()
a__ = qk._amax.detach().item()
a__ = qv._amax.detach().item()
a__ = max(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
qq._amax.fill_(__lowerCAmelCase )
qk._amax.fill_(__lowerCAmelCase )
qv._amax.fill_(__lowerCAmelCase )
logger.info(F' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ):
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
a__ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__lowerCAmelCase )
a__ = mod._input_quantizer._amax.data.detach().item()
logger.info(F'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
a__ = mod.weight.shape[0]
a__ = mod._weight_quantizer._amax.detach()
a__ = torch.ones(__lowerCAmelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
a__ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
a__ = set(range(len(mod.weight.size() ) ) ) - axis_set
a__ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__lowerCAmelCase , keepdims=__lowerCAmelCase ).detach()
logger.info(F'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
a__ = amax
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int]=2_5 , __lowerCAmelCase : List[Any]=1_8_0 , __lowerCAmelCase : Tuple=None ):
if ignore is None:
a__ = []
elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a__ = [ignore]
a__ = 0
for name, mod in model.named_modules():
if not hasattr(__lowerCAmelCase , 'weight' ):
continue
a__ = max(__lowerCAmelCase , len(__lowerCAmelCase ) )
for name, mod in model.named_modules():
a__ = getattr(__lowerCAmelCase , '_input_quantizer' , __lowerCAmelCase )
a__ = getattr(__lowerCAmelCase , '_weight_quantizer' , __lowerCAmelCase )
if not hasattr(__lowerCAmelCase , 'weight' ):
continue
if type(__lowerCAmelCase ) in ignore:
continue
if [True for s in ignore if type(__lowerCAmelCase ) is str and s in name]:
continue
a__ = F'Act:{input_q.extra_repr()}'
a__ = F'Wgt:{weight_q.extra_repr()}'
a__ = F'{name:{name_width}} {act_str} {wgt_str}'
if len(__lowerCAmelCase ) <= line_width:
logger.info(__lowerCAmelCase )
else:
logger.info(F'{name:{name_width}} {act_str}' )
logger.info(F'{" ":{name_width}} {wgt_str}' )
def __lowercase ( __lowerCAmelCase : Dict ):
a__ = 0
for name, mod in model.named_modules():
if isinstance(__lowerCAmelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F'{name:80} {mod}' )
count += 1
print(F'{count} TensorQuantizers found in model' )
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
a__ = getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if quantizer_mod is not None:
assert hasattr(__lowerCAmelCase , __lowerCAmelCase )
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
logger.warning(F'{name} has no {quantizer}' )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int]="both" , **__lowerCAmelCase : str ):
a__ = F'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
if which in ["input", "both"]:
set_quantizer(__lowerCAmelCase , __lowerCAmelCase , '_input_quantizer' , __lowerCAmelCase , __lowerCAmelCase )
if which in ["weight", "both"]:
set_quantizer(__lowerCAmelCase , __lowerCAmelCase , '_weight_quantizer' , __lowerCAmelCase , __lowerCAmelCase )
logger.info(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , **__lowerCAmelCase : Optional[Any] ):
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '_input_quantizer' ) or hasattr(__lowerCAmelCase , '_weight_quantizer' ):
for n in names:
if re.search(__lowerCAmelCase , __lowerCAmelCase ):
set_quantizers(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(__lowerCAmelCase , __lowerCAmelCase ):
a__ = F'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
logger.info(__lowerCAmelCase )
| 240
| 0
|
"""simple docstring"""
from math import ceil, sqrt
def lowercase__ ( snake_case_ :int = 1_000_000 ):
__UpperCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__UpperCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__UpperCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 360
|
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_lowercase : str = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : int = 14 ):
if group not in primes:
raise ValueError('''Unsupported Group''' )
__UpperCAmelCase = primes[group]['''prime''']
__UpperCAmelCase = primes[group]['''generator''']
__UpperCAmelCase = int(hexlify(urandom(32 ) ) , base=16 )
def a ( self : int ):
return hex(self.__private_key )[2:]
def a ( self : Dict ):
__UpperCAmelCase = pow(self.generator , self.__private_key , self.prime )
return hex(_lowercase )[2:]
def a ( self : Union[str, Any] , _lowercase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_lowercase , (self.prime - 1) // 2 , self.prime ) == 1
)
def a ( self : Optional[Any] , _lowercase : str ):
__UpperCAmelCase = int(_lowercase , base=16 )
if not self.is_valid_public_key(_lowercase ):
raise ValueError('''Invalid public key''' )
__UpperCAmelCase = pow(_lowercase , self.__private_key , self.prime )
return shaaaa(str(_lowercase ).encode() ).hexdigest()
@staticmethod
def a ( _lowercase : int , _lowercase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_lowercase , (prime - 1) // 2 , _lowercase ) == 1
)
@staticmethod
def a ( _lowercase : str , _lowercase : str , _lowercase : int = 14 ):
__UpperCAmelCase = int(_lowercase , base=16 )
__UpperCAmelCase = int(_lowercase , base=16 )
__UpperCAmelCase = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(_lowercase , _lowercase ):
raise ValueError('''Invalid public key''' )
__UpperCAmelCase = pow(_lowercase , _lowercase , _lowercase )
return shaaaa(str(_lowercase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
| 0
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase (datasets.BuilderConfig ):
"""simple docstring"""
_UpperCAmelCase :Optional[datasets.Features] = None
class UpperCAmelCase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = PandasConfig
def _snake_case ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _snake_case ( self , _UpperCAmelCase ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowercase__: Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
lowercase__: Tuple = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase__: Any = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase__: List[Any] = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase__: int = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'''files''': files} ) )
return splits
def _snake_case ( self , _UpperCAmelCase ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase__: List[str] = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def _snake_case ( self , _UpperCAmelCase ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , '''rb''' ) as f:
lowercase__: Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase )
| 177
|
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> tuple:
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = """<pad>"""
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCAmelCase ( self : List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Any:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.tokenize(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> Optional[int]:
# fmt: off
snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
| 3
| 1
|
from __future__ import annotations
import time
import numpy as np
__A = [8, 5, 9, 7]
__A = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__A = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> None:
'''simple docstring'''
__lowerCamelCase = claim_vector
__lowerCamelCase = allocated_resources_table
__lowerCamelCase = maximum_claim_table
def lowercase_ ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowercase_ ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowercase_ ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowerCamelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowercase_ ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(lowerCamelCase__ ): i for i in self.__need()}
def lowercase_ ( self , **lowerCamelCase__ ) -> None:
'''simple docstring'''
__lowerCamelCase = self.__need()
__lowerCamelCase = self.__allocated_resources_table
__lowerCamelCase = self.__available_resources()
__lowerCamelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__lowerCamelCase = False
for each_need in need_list:
__lowerCamelCase = True
for index, need in enumerate(lowerCamelCase__ ):
if need > available_resources[index]:
__lowerCamelCase = False
break
if execution:
__lowerCamelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCamelCase = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(lowerCamelCase__ )
# update available/freed resources stack
__lowerCamelCase = np.array(lowerCamelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(lowerCamelCase__ ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(lowerCamelCase__ ) + 1}"""
+ ' '.join(f"""{it:>8}""" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(lowerCamelCase__ ) + 1}"""
+ ' '.join(f"""{it:>8}""" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(lowerCamelCase__ ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(lowerCamelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__A = data_utils.TransfoXLTokenizer
__A = data_utils.TransfoXLCorpus
__A = data_utils
__A = data_utils
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase__ , 'rb' ) as fp:
__lowerCamelCase = pickle.load(UpperCamelCase__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__lowerCamelCase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
__lowerCamelCase = corpus.vocab.__dict__
torch.save(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , UpperCamelCase__ )
__lowerCamelCase = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__lowerCamelCase = os.path.abspath(UpperCamelCase__ )
__lowerCamelCase = os.path.abspath(UpperCamelCase__ )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__lowerCamelCase = TransfoXLConfig()
else:
__lowerCamelCase = TransfoXLConfig.from_json_file(UpperCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
__lowerCamelCase = TransfoXLLMHeadModel(UpperCamelCase__ )
__lowerCamelCase = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
__lowerCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
print(F"""Save configuration file to {os.path.abspath(UpperCamelCase__ )}""" )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__A = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 90
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 358
|
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase_ = _symbol_database.Default()
lowerCAmelCase_ = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
lowerCAmelCase_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase_ = None
lowerCAmelCase_ = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase_ = 45
lowerCAmelCase_ = 1_581
lowerCAmelCase_ = 1_517
lowerCAmelCase_ = 1_570
lowerCAmelCase_ = 1_584
lowerCAmelCase_ = 1_793
lowerCAmelCase_ = 1_795
lowerCAmelCase_ = 1_916
lowerCAmelCase_ = 1_864
lowerCAmelCase_ = 1_905
lowerCAmelCase_ = 1_919
lowerCAmelCase_ = 2_429
lowerCAmelCase_ = 2_208
lowerCAmelCase_ = 2_418
lowerCAmelCase_ = 2_323
lowerCAmelCase_ = 2_407
# @@protoc_insertion_point(module_scope)
| 302
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class A__ ( A__ ):
A__ = 42
class A__ ( A__ , A__ ):
A__ = True
@register_to_config
def __init__( self : List[str] , _a : int = 3 , _a : int = 3 , _a : Tuple[str] = ("DownEncoderBlock2D",) , _a : Tuple[str] = ("UpDecoderBlock2D",) , _a : Tuple[int] = (64,) , _a : int = 1 , _a : str = "silu" , _a : int = 4 , _a : int = 32 , _a : int = 32 , _a : float = 0.1_82_15 , ) -> Dict:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_SCREAMING_SNAKE_CASE =Encoder(
in_channels=_a , out_channels=_a , down_block_types=_a , block_out_channels=_a , layers_per_block=_a , act_fn=_a , norm_num_groups=_a , double_z=_a , )
# pass init params to Decoder
_SCREAMING_SNAKE_CASE =Decoder(
in_channels=_a , out_channels=_a , up_block_types=_a , block_out_channels=_a , layers_per_block=_a , norm_num_groups=_a , act_fn=_a , )
_SCREAMING_SNAKE_CASE =nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_SCREAMING_SNAKE_CASE =nn.Convad(_a , _a , 1 )
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
# only relevant if vae tiling is enabled
_SCREAMING_SNAKE_CASE =self.config.sample_size
_SCREAMING_SNAKE_CASE =(
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_SCREAMING_SNAKE_CASE =int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_SCREAMING_SNAKE_CASE =0.25
def A ( self : Union[str, Any] , _a : Any , _a : List[str]=False ) -> Optional[int]:
'''simple docstring'''
if isinstance(_a , (Encoder, Decoder) ):
_SCREAMING_SNAKE_CASE =value
def A ( self : Optional[Any] , _a : bool = True ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =use_tiling
def A ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
self.enable_tiling(_a )
def A ( self : List[str] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =True
def A ( self : str ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A ( self : List[str] ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
def fn_recursive_add_processors(_a : str , _a : torch.nn.Module , _a : Dict[str, AttentionProcessor] ):
if hasattr(_a , 'set_processor' ):
_SCREAMING_SNAKE_CASE =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def A ( self : Any , _a : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_a )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_a : str , _a : torch.nn.Module , _a : Union[str, Any] ):
if hasattr(_a , 'set_processor' ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def A ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def A ( self : Optional[Any] , _a : torch.FloatTensor , _a : bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_a , return_dict=_a )
if self.use_slicing and x.shape[0] > 1:
_SCREAMING_SNAKE_CASE =[self.encoder(_a ) for x_slice in x.split(1 )]
_SCREAMING_SNAKE_CASE =torch.cat(_a )
else:
_SCREAMING_SNAKE_CASE =self.encoder(_a )
_SCREAMING_SNAKE_CASE =self.quant_conv(_a )
_SCREAMING_SNAKE_CASE =DiagonalGaussianDistribution(_a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_a )
def A ( self : str , _a : torch.FloatTensor , _a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_a , return_dict=_a )
_SCREAMING_SNAKE_CASE =self.post_quant_conv(_a )
_SCREAMING_SNAKE_CASE =self.decoder(_a )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
@apply_forward_hook
def A ( self : str , _a : torch.FloatTensor , _a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
_SCREAMING_SNAKE_CASE =[self._decode(_a ).sample for z_slice in z.split(1 )]
_SCREAMING_SNAKE_CASE =torch.cat(_a )
else:
_SCREAMING_SNAKE_CASE =self._decode(_a ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_a )
def A ( self : Tuple , _a : Tuple , _a : int , _a : Optional[Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =min(a.shape[2] , b.shape[2] , _a )
for y in range(_a ):
_SCREAMING_SNAKE_CASE =a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def A ( self : Dict , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =min(a.shape[3] , b.shape[3] , _a )
for x in range(_a ):
_SCREAMING_SNAKE_CASE =a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def A ( self : Union[str, Any] , _a : torch.FloatTensor , _a : bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_SCREAMING_SNAKE_CASE =int(self.tile_latent_min_size * self.tile_overlap_factor )
_SCREAMING_SNAKE_CASE =self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_SCREAMING_SNAKE_CASE =[]
for i in range(0 , x.shape[2] , _a ):
_SCREAMING_SNAKE_CASE =[]
for j in range(0 , x.shape[3] , _a ):
_SCREAMING_SNAKE_CASE =x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_SCREAMING_SNAKE_CASE =self.encoder(_a )
_SCREAMING_SNAKE_CASE =self.quant_conv(_a )
row.append(_a )
rows.append(_a )
_SCREAMING_SNAKE_CASE =[]
for i, row in enumerate(_a ):
_SCREAMING_SNAKE_CASE =[]
for j, tile in enumerate(_a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_SCREAMING_SNAKE_CASE =self.blend_v(rows[i - 1][j] , _a , _a )
if j > 0:
_SCREAMING_SNAKE_CASE =self.blend_h(row[j - 1] , _a , _a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_a , dim=3 ) )
_SCREAMING_SNAKE_CASE =torch.cat(_a , dim=2 )
_SCREAMING_SNAKE_CASE =DiagonalGaussianDistribution(_a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_a )
def A ( self : Optional[int] , _a : torch.FloatTensor , _a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_SCREAMING_SNAKE_CASE =int(self.tile_sample_min_size * self.tile_overlap_factor )
_SCREAMING_SNAKE_CASE =self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_SCREAMING_SNAKE_CASE =[]
for i in range(0 , z.shape[2] , _a ):
_SCREAMING_SNAKE_CASE =[]
for j in range(0 , z.shape[3] , _a ):
_SCREAMING_SNAKE_CASE =z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_SCREAMING_SNAKE_CASE =self.post_quant_conv(_a )
_SCREAMING_SNAKE_CASE =self.decoder(_a )
row.append(_a )
rows.append(_a )
_SCREAMING_SNAKE_CASE =[]
for i, row in enumerate(_a ):
_SCREAMING_SNAKE_CASE =[]
for j, tile in enumerate(_a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_SCREAMING_SNAKE_CASE =self.blend_v(rows[i - 1][j] , _a , _a )
if j > 0:
_SCREAMING_SNAKE_CASE =self.blend_h(row[j - 1] , _a , _a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_a , dim=3 ) )
_SCREAMING_SNAKE_CASE =torch.cat(_a , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
def A ( self : Optional[int] , _a : torch.FloatTensor , _a : bool = False , _a : bool = True , _a : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =sample
_SCREAMING_SNAKE_CASE =self.encode(_a ).latent_dist
if sample_posterior:
_SCREAMING_SNAKE_CASE =posterior.sample(generator=_a )
else:
_SCREAMING_SNAKE_CASE =posterior.mode()
_SCREAMING_SNAKE_CASE =self.decode(_a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
| 47
|
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase : List[Any] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCamelCase : Any = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCamelCase : Optional[Any] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCamelCase : Optional[Any] = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : Tuple ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def A ( self : Union[str, Any] , _a : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def A ( self : int , _a : Tuple , _a : List[str] , _a : List[str]=0.9 , _a : Dict=3 , _a : Optional[int]=0.5 ) -> Optional[int]:
'''simple docstring'''
if NLTK_VERSION >= version.Version('3.6.5' ):
_SCREAMING_SNAKE_CASE =[
meteor_score.single_meteor_score(
word_tokenize(_a ) , word_tokenize(_a ) , alpha=_a , beta=_a , gamma=_a )
for ref, pred in zip(_a , _a )
]
else:
_SCREAMING_SNAKE_CASE =[
meteor_score.single_meteor_score(_a , _a , alpha=_a , beta=_a , gamma=_a )
for ref, pred in zip(_a , _a )
]
return {"meteor": np.mean(_a )}
| 47
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
_lowerCamelCase : int = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCamelCase : Any = {
'''t5-small''': 5_12,
'''t5-base''': 5_12,
'''t5-large''': 5_12,
'''t5-3b''': 5_12,
'''t5-11b''': 5_12,
}
_lowerCamelCase : List[str] = '''▁'''
class lowercase ( __lowerCamelCase ):
lowercase__ : int = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any]="</s>" , _UpperCamelCase : Optional[Any]="<unk>" , _UpperCamelCase : Optional[Any]="<pad>" , _UpperCamelCase : str=100 , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[Dict[str, Any]] = None , _UpperCamelCase : Optional[Any]=True , **_UpperCamelCase : List[Any] , ) -> Any:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE = [F"<extra_id_{i}>" for i in range(__lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
SCREAMING_SNAKE_CASE = len(set(filter(lambda _UpperCamelCase : bool("extra_id" in str(__lowercase ) ) , __lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
SCREAMING_SNAKE_CASE = legacy
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , extra_ids=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = extra_ids
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
@staticmethod
def __snake_case( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
SCREAMING_SNAKE_CASE = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , __lowercase , )
return max_model_length
@property
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> int:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__lowercase )) + [1]
return ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1]
def __snake_case( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R"<extra_id_\d+>" , __lowercase ) ) is not None , self.additional_special_tokens ) ) )
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
return [self._convert_token_to_id(__lowercase ) for token in self.get_sentinel_tokens()]
def __snake_case( self : Optional[int] , _UpperCamelCase : List[int] ) -> Optional[int]:
'''simple docstring'''
if len(__lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __snake_case( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __snake_case( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._add_eos_if_not_present(__lowercase )
if token_ids_a is None:
return token_ids_a
else:
SCREAMING_SNAKE_CASE = self._add_eos_if_not_present(__lowercase )
return token_ids_a + token_ids_a
def __getstate__( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : str , _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case( self : Dict , _UpperCamelCase : "TextInput" , **_UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
if not self.legacy:
SCREAMING_SNAKE_CASE = SPIECE_UNDERLINE + text.replace(__lowercase , " " )
return super().tokenize(__lowercase , **__lowercase )
def __snake_case( self : Any , _UpperCamelCase : Dict , **_UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
if not self.legacy:
SCREAMING_SNAKE_CASE = text.startswith(__lowercase )
if is_first:
SCREAMING_SNAKE_CASE = text[1:]
SCREAMING_SNAKE_CASE = self.sp_model.encode(__lowercase , out_type=__lowercase )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(__lowercase ):
SCREAMING_SNAKE_CASE = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __snake_case( self : Any , _UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
if token.startswith("<extra_id_" ):
SCREAMING_SNAKE_CASE = re.match(R"<extra_id_(\d+)>" , __lowercase )
SCREAMING_SNAKE_CASE = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__lowercase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(__lowercase )
else:
SCREAMING_SNAKE_CASE = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def __snake_case( self : Optional[int] , _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ''''''
SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase ) + token
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(__lowercase )
SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple:
'''simple docstring'''
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 350
|
from __future__ import annotations
from collections.abc import Iterator
class lowercase :
def __init__( self : str , _UpperCamelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
class lowercase :
def __init__( self : str , _UpperCamelCase : Node ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tree
def __snake_case( self : int , _UpperCamelCase : Node | None ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : List[Any] ) -> Iterator[int]:
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
__a : Union[str, Any] = AutoTokenizer.from_pretrained('google/mt5-small' )
__a : Union[str, Any] = tokenizer('Hello there' , return_tensors='tf' ).input_ids
__a : Dict = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
__a : Union[str, Any] = model(__a , labels=__a ).loss
__a : List[str] = -tf.math.reduce_mean(__a ).numpy()
__a : str = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 27
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class A__ ( __magic_name__ ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : Dict = 8
# DPR tok
lowerCAmelCase__ : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(a , exist_ok=a )
lowerCAmelCase__ : Union[str, Any] = os.path.join(a , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase__ : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCAmelCase__ : str = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase__ : Dict = {'unk_token': '<unk>'}
lowerCAmelCase__ : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(a , exist_ok=a )
lowerCAmelCase__ : Optional[Any] = os.path.join(a , BART_VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : Any = os.path.join(a , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , 'rag_tokenizer' )
lowerCAmelCase__ : Any = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowerCAmelCase__ : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(a )
rag_tokenizer.save_pretrained(a )
lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained(a , config=a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
lowerCAmelCase__ : Any = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
lowerCAmelCase__ : List[str] = tokenizer(a )
self.assertIsNotNone(a )
@slow
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
lowerCAmelCase__ : List[str] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
lowerCAmelCase__ : int = tokenizer(a )
self.assertIsNotNone(a )
| 212
| 0
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A_ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(snake_case__ ) , version.parse(snake_case__ ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
_snake_case : str = F"\n{hint}" if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , snake_case__ ):
_snake_case : Union[str, Any] = requirement, None, None
else:
_snake_case : Optional[int] = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , snake_case__ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
F" got {requirement}" )
_snake_case : List[Any] = match[0]
_snake_case : int = want_full.split(""",""" ) # there could be multiple requirements
_snake_case : Dict = {}
for w in want_range:
_snake_case : List[Any] = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , snake_case__ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
F" but got {requirement}" )
_snake_case : List[Any] = match[0]
_snake_case : List[str] = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
_snake_case : List[Any] = """.""".join([str(snake_case__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return
# check if any version is installed
try:
_snake_case : int = importlib.metadata.version(snake_case__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : int = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(snake_case__ , snake_case__ )
| 367
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger()
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = field(default_factory=__a )
lowercase__ = field(default_factory=__a )
def UpperCamelCase_ ( self: Optional[Any], a_: Union[str, Any], a_: Tensor, a_: Tensor ):
'''simple docstring'''
_snake_case : Optional[Any] = len(list(m.modules() ) ) == 1 or isinstance(a_, nn.Convad ) or isinstance(a_, nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self: List[Any], a_: Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 0
lowercase__ = field(default_factory=__a )
lowercase__ = field(default_factory=__a )
def __call__( self: Dict, a_: Tensor ):
'''simple docstring'''
_snake_case : Tuple = Tracker(self.dest )(a_ ).parametrized
_snake_case : int = Tracker(self.src )(a_ ).parametrized
_snake_case : Tuple = list(filter(lambda a_ : type(a_ ) not in self.src_skip, a_ ) )
_snake_case : Union[str, Any] = list(filter(lambda a_ : type(a_ ) not in self.dest_skip, a_ ) )
if len(a_ ) != len(a_ ):
raise Exception(
f"Numbers of operations are different. Source module has {len(a_ )} operations while"
f" destination module has {len(a_ )}." )
for dest_m, src_m in zip(a_, a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : ResNetConfig , snake_case__ : Path , snake_case__ : bool = True ):
"""simple docstring"""
print(F"Converting {name}..." )
with torch.no_grad():
_snake_case : Dict = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
_snake_case : List[Any] = ResNetForImageClassification(snake_case__ ).eval()
_snake_case : List[str] = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
_snake_case : Optional[Any] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
_snake_case : Optional[int] = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=snake_case__ , )
# we can use the convnext one
_snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def UpperCAmelCase__ (snake_case__ : Path , snake_case__ : str = None , snake_case__ : bool = True ):
"""simple docstring"""
_snake_case : Optional[Any] = """imagenet-1k-id2label.json"""
_snake_case : Optional[Any] = 10_00
_snake_case : str = (1, num_labels)
_snake_case : List[Any] = """huggingface/label-files"""
_snake_case : Union[str, Any] = num_labels
_snake_case : Optional[int] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : str = idalabel
_snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
_snake_case : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
_snake_case : Optional[int] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
A_ = parser.parse_args()
A_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 132
| 0
|
from pathlib import Path
import numpy as np
from PIL import Image
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def A_ ( _UpperCAmelCase ):
return (gray > 1_27) & (gray <= 2_55)
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = np.zeros_like(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE_: Optional[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE_: str = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowerCAmelCase : List[str] = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
lowerCAmelCase : Tuple = np.array(Image.open(lena_path))
# kernel to be applied
lowerCAmelCase : List[str] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowerCAmelCase : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowerCAmelCase : Tuple = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 13
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : List[str] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Tuple = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: List[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE_: List[str] = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: int = config["lr"]
SCREAMING_SNAKE_CASE_: Any = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: List[Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: Tuple = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: Dict = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Optional[int] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.loss
SCREAMING_SNAKE_CASE_: Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 13
| 1
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7_68 ):
"""simple docstring"""
super().__init__(_lowercase )
lowerCAmelCase__ = proj_size
lowerCAmelCase__ = CLIPVisionModel(_lowercase )
lowerCAmelCase__ = PaintByExampleMapper(_lowercase )
lowerCAmelCase__ = nn.LayerNorm(config.hidden_size )
lowerCAmelCase__ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
lowerCAmelCase__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ = self.model(pixel_values=_lowercase )
lowerCAmelCase__ = clip_output.pooler_output
lowerCAmelCase__ = self.mapper(latent_states[:, None] )
lowerCAmelCase__ = self.final_layer_norm(_lowercase )
lowerCAmelCase__ = self.proj_out(_lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self , _UpperCamelCase ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = (config.num_hidden_layers + 1) // 5
lowerCAmelCase__ = config.hidden_size
lowerCAmelCase__ = 1
lowerCAmelCase__ = nn.ModuleList(
[
BasicTransformerBlock(_lowercase , _lowercase , _lowercase , activation_fn='gelu' , attention_bias=_lowercase )
for _ in range(_lowercase )
] )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
for block in self.blocks:
lowerCAmelCase__ = block(_lowercase )
return hidden_states
| 370
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Tuple = BarthezTokenizer
_SCREAMING_SNAKE_CASE : int = BarthezTokenizerFast
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Tuple = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_UpperCamelCase )
lowerCAmelCase__ = tokenizer
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = '<pad>'
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_UpperCamelCase ) , 10_11_22 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase__ = [0, 57, 30_18, 7_03_07, 91, 2]
lowerCAmelCase__ = self.tokenizer(
_UpperCamelCase , max_length=len(_UpperCamelCase ) , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowerCAmelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ = tokenizer.tokenize(_UpperCamelCase )
lowerCAmelCase__ = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(_UpperCamelCase )
lowerCAmelCase__ = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowerCAmelCase__ = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_UpperCamelCase , )
| 122
| 0
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=36 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1000 , ) ->List[Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = text_seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = coordinate_size
lowerCAmelCase = shape_size
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase = text_seq_length
lowerCAmelCase = (image_size // patch_size) ** 2 + 1
lowerCAmelCase = self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = LayoutLMvaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# text + image
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase = model(pixel_values=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = LayoutLMvaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = self.num_labels
lowerCAmelCase = LayoutLMvaForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->int:
lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : List[Any] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : Tuple = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->str:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = LayoutLMvaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) ->int:
lowerCAmelCase = copy.deepcopy(__SCREAMING_SNAKE_CASE )
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
elif model_class in get_values(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
elif model_class in [
*get_values(__SCREAMING_SNAKE_CASE ),
]:
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
elif model_class in [
*get_values(__SCREAMING_SNAKE_CASE ),
]:
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE , )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = LayoutLMvaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
return LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor([[1, 2]] )
lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase = model(
input_ids=input_ids.to(__SCREAMING_SNAKE_CASE ) , bbox=bbox.to(__SCREAMING_SNAKE_CASE ) , pixel_values=pixel_values.to(__SCREAMING_SNAKE_CASE ) , )
# verify the logits
lowerCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 338
|
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowerCAmelCase = False
if num < 0:
lowerCAmelCase = True
lowerCAmelCase = -num
lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338
| 1
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowercase__ ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase_ = load_tool("text-classification" , remote=_UpperCAmelCase )
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(_UpperCAmelCase , "positive" )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(_UpperCAmelCase , "positive" )
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(_UpperCAmelCase , "positive" )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(_UpperCAmelCase , "positive" )
| 241
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase = 250_004
lowerCamelCase = 250_020
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartTokenizer
UpperCamelCase = MBartTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = MBartTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ = MBartTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = '''facebook/mbart-large-en-ro'''
UpperCamelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCamelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCamelCase = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def lowercase__ ( cls : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
UpperCAmelCase_ = 1
return cls
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , _UpperCAmelCase )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250026, 250001] )
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = MBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 241
| 1
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase = 10 ) -> str:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or n < 0:
raise ValueError("""Invalid input""" )
snake_case__ : List[Any] = 10**n
snake_case__ : Optional[Any] = 28_433 * (pow(2 , 7_830_457 , _lowerCAmelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }")
| 35
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Dict = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "camembert"
def __init__( self, lowerCamelCase__=3_0522, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : List[Any] = vocab_size
A : Dict = hidden_size
A : str = num_hidden_layers
A : List[Any] = num_attention_heads
A : List[str] = hidden_act
A : Tuple = intermediate_size
A : Tuple = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : Tuple = type_vocab_size
A : List[Any] = initializer_range
A : str = layer_norm_eps
A : Tuple = position_embedding_type
A : str = use_cache
A : Any = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
A : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 116
| 0
|
"""simple docstring"""
A_ : Union[str, Any] ="""
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
A_ : Dict =[{"""type""": """code""", """content""": INSTALL_CONTENT}]
A_ : Any ={
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 80
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
A_ : Any =None
A_ : Optional[int] =logging.get_logger(__name__)
A_ : List[str] ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
A_ : List[Any] ={
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
A_ : Any ={
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
A_ : Union[str, Any] ="""▁"""
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : str = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : int = BarthezTokenizer
def __init__( self , a__=None , a__=None , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , **a__ , )
_lowerCamelCase = vocab_file
_lowerCamelCase = False if not self.vocab_file else True
def snake_case_ ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , a__ , a__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 80
| 1
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 3.0
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> str:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=__lowerCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCAmelCase( self ) -> Optional[int]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowercase__ : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
lowercase__ : Dict = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowercase__ : Optional[int] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , __lowerCAmelCase )
@require_multi_gpu
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : List[Any] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
__a: Union[str, Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__a: List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
__a: Optional[int] = torch.nn.Linear(1_00, 2_00)
__a: Optional[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
__a: List[str] = """"""
__a: List[Any] = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 198
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Optional[Any] = 0
lowercase__ : Any = [0]
lowercase__ : List[Any] = [0]
lowercase__ : Optional[Any] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 0 )
lowercase__ : List[str] = [60]
lowercase__ : List[str] = [10]
lowercase__ : List[str] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 0 )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = 3
lowercase__ : List[Any] = [1, 2, 3]
lowercase__ : Union[str, Any] = [3, 2, 1]
lowercase__ : List[Any] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 5 )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : int = 50
lowercase__ : Any = [60, 100, 120]
lowercase__ : int = [10, 20, 30]
lowercase__ : Optional[Any] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 198
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 366
|
UpperCAmelCase__ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 26
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3
|
'''simple docstring'''
import os
import sys
import unittest
lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : List[Any] = {'''BertModelTest''': '''BertModelTester'''}
A : int = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : List[str] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A : Union[str, Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Dict = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A : str = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 3
| 1
|
"""simple docstring"""
import random
def UpperCamelCase ( _A : Dict , _A : Optional[Any] , _A : List[str] = False )-> Optional[Any]:
"""simple docstring"""
A__ = {i: [] for i in range(_A )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_A )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_A ):
for j in range(i + 1 , _A ):
if random.random() < probability:
graph[i].append(_A )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_A )
return graph
def UpperCamelCase ( _A : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return {
i: [j for j in range(_A ) if i != j] for i in range(_A )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCAmelCase_ : Dict = False
@skip_mps
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
lowerCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __A ( cls ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def __A ( cls ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def __A ( self ):
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , )
A__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
A__ = CLIPTextModel(UpperCAmelCase__ )
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCAmelCase__ )
else:
A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A__ = A__ = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def __A ( self ):
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = self.get_dummy_inputs(UpperCAmelCase__ )
A__ = pipe(**UpperCAmelCase__ ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
A__ = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1e-3 )
def __A ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __A ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __A ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __A ( self ):
super().test_save_load_local(expected_max_difference=5e-4 )
def __A ( self ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
@classmethod
def __A ( cls ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def __A ( cls ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ = torch.manual_seed(51 )
A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to("cuda" )
A__ = "a painting of an elephant with glasses"
A__ = [5, 7]
A__ = pipe(
prompt=UpperCAmelCase__ , token_indices=UpperCAmelCase__ , guidance_scale=7.5 , generator=UpperCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 198
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325
|
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE__ :Dict = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Optional[Any] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase : int = text_generator("This is a test" , do_sample=__a )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
_UpperCamelCase : List[Any] = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
__a , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
_UpperCamelCase : int = text_generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(__a )},
{"generated_token_ids": ANY(__a )},
] , )
_UpperCamelCase : int = text_generator.model.config.eos_token_id
_UpperCamelCase : int = "<pad>"
_UpperCamelCase : List[str] = text_generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(__a )},
{"generated_token_ids": ANY(__a )},
],
[
{"generated_token_ids": ANY(__a )},
{"generated_token_ids": ANY(__a )},
],
] , )
@require_tf
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
_UpperCamelCase : Optional[int] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase : Dict = text_generator("This is a test" , do_sample=__a )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
_UpperCamelCase : Optional[int] = text_generator(["This is a test", "This is a second test"] , do_sample=__a )
self.assertEqual(
__a , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str] , __a : Tuple , __a : List[str] ) -> Dict:
_UpperCamelCase : Optional[Any] = TextGenerationPipeline(model=__a , tokenizer=__a )
return text_generator, ["This is a test", "Another test"]
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
_UpperCamelCase : Optional[int] = "Hello I believe in"
_UpperCamelCase : List[str] = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = text_generator(__a )
self.assertEqual(
__a , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
_UpperCamelCase : Union[str, Any] = text_generator(__a , stop_sequence=" fe" )
self.assertEqual(__a , [{"generated_text": "Hello I believe in fe"}] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : str , __a : Any ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = text_generator.model
_UpperCamelCase : Any = text_generator.tokenizer
_UpperCamelCase : Optional[Any] = text_generator("This is a test" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_UpperCamelCase : int = text_generator("This is a test" , return_full_text=__a )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_UpperCamelCase : Union[str, Any] = pipeline(task="text-generation" , model=__a , tokenizer=__a , return_full_text=__a )
_UpperCamelCase : int = text_generator("This is a test" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_UpperCamelCase : int = text_generator("This is a test" , return_full_text=__a )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_UpperCamelCase : Optional[int] = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCamelCase : Any = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
_UpperCamelCase : Optional[int] = text_generator("test" , return_full_text=__a , return_text=__a )
with self.assertRaises(__a ):
_UpperCamelCase : Tuple = text_generator("test" , return_full_text=__a , return_tensors=__a )
with self.assertRaises(__a ):
_UpperCamelCase : int = text_generator("test" , return_text=__a , return_tensors=__a )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCamelCase : Union[str, Any] = text_generator("" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCamelCase : Union[str, Any] = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCamelCase : int = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
_UpperCamelCase : Optional[int] = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__a ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
import torch
# Classic `model_kwargs`
_UpperCamelCase : Any = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase : List[Any] = pipe("This is a test" )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCamelCase : int = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase : List[Any] = pipe("This is a test" )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCamelCase : Any = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_UpperCamelCase : Optional[Any] = pipe("This is a test" )
self.assertEqual(
__a , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
import torch
_UpperCamelCase : List[str] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import torch
_UpperCamelCase : List[Any] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=__a , top_p=0.5 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
_UpperCamelCase : int = "Hello world"
_UpperCamelCase : Optional[Any] = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
_UpperCamelCase : Tuple = logging.get_logger("transformers.generation.tf_utils" )
else:
_UpperCamelCase : List[str] = logging.get_logger("transformers.generation.utils" )
_UpperCamelCase : Tuple = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__a ) as cl:
_UpperCamelCase : Any = text_generator(__a , max_length=10 , max_new_tokens=1 )
self.assertIn(__a , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__a ) as cl:
_UpperCamelCase : List[str] = text_generator(__a , max_new_tokens=1 )
self.assertNotIn(__a , cl.out )
with CaptureLogger(__a ) as cl:
_UpperCamelCase : Optional[Any] = text_generator(__a , max_length=10 )
self.assertNotIn(__a , cl.out )
| 351
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[int] = -1
_UpperCamelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Any = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Optional[int] = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Dict = -1
_UpperCamelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : List[str] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
_UpperCamelCase : Tuple = TextIteratorStreamer(__a )
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : Optional[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
_UpperCamelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Union[str, Any] = -1
_UpperCamelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Union[str, Any] = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_UpperCamelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCamelCase : Optional[int] = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase : Tuple = cs.out[:-1]
self.assertEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_UpperCamelCase : int = -1
_UpperCamelCase : Any = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase : List[str] = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase : int = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase : int = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_UpperCamelCase : Optional[Any] = -1
_UpperCamelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_UpperCamelCase : Any = TextIteratorStreamer(__a , timeout=0.0_01 )
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase : List[Any] = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_UpperCamelCase : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 310
| 0
|
def lowercase ( SCREAMING_SNAKE_CASE__ : int ) -> list[int]:
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
_snake_case : Dict = [True] * (num + 1)
_snake_case : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , SCREAMING_SNAKE_CASE__ ):
_snake_case : str = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 317
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = """swin"""
snake_case_ : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , lowerCAmelCase : Optional[int]=224 , lowerCAmelCase : int=4 , lowerCAmelCase : Any=3 , lowerCAmelCase : int=96 , lowerCAmelCase : Optional[Any]=[2, 2, 6, 2] , lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , lowerCAmelCase : Tuple=7 , lowerCAmelCase : List[Any]=4.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : Any=False , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=None , **lowerCAmelCase : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : int = image_size
_snake_case : Any = patch_size
_snake_case : Union[str, Any] = num_channels
_snake_case : int = embed_dim
_snake_case : Dict = depths
_snake_case : Dict = len(lowerCAmelCase)
_snake_case : Optional[Any] = num_heads
_snake_case : Tuple = window_size
_snake_case : int = mlp_ratio
_snake_case : Any = qkv_bias
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[Any] = drop_path_rate
_snake_case : List[Any] = hidden_act
_snake_case : str = use_absolute_embeddings
_snake_case : Tuple = layer_norm_eps
_snake_case : Any = initializer_range
_snake_case : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case : Dict = int(embed_dim * 2 ** (len(lowerCAmelCase) - 1))
_snake_case : Optional[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase) + 1)]
_snake_case , _snake_case : List[str] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def UpperCamelCase_ ( self : Dict) -> float:
"""simple docstring"""
return 1E-4
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case , _snake_case = None , _snake_case = None ):
"""simple docstring"""
if start is None:
UpperCAmelCase = 0
if end is None:
UpperCAmelCase = len(_snake_case ) - 1
if start >= end:
return
UpperCAmelCase = (start + end) // 2
slowsort(_snake_case , _snake_case , _snake_case )
slowsort(_snake_case , mid + 1 , _snake_case )
if sequence[end] < sequence[mid]:
UpperCAmelCase , UpperCAmelCase = sequence[mid], sequence[end]
slowsort(_snake_case , _snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 234
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
_UpperCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCAmelCase = getattr(_snake_case , _snake_case )
if weight_type is not None:
UpperCAmelCase = getattr(_snake_case , _snake_case ).shape
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = fairseq_model.state_dict()
UpperCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(_snake_case )[0].split(""".""" )[-2]
UpperCAmelCase = mapped_key.replace("""*""" , _snake_case )
if "weight_g" in name:
UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase = """weight_v"""
elif "bias" in name:
UpperCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase = """weight"""
else:
UpperCAmelCase = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase = name.split(""".""" )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=True ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = UniSpeechSatConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = UniSpeechSatConfig()
UpperCAmelCase = """"""
if is_finetuned:
UpperCAmelCase = UniSpeechSatForCTC(_snake_case )
else:
UpperCAmelCase = UniSpeechSatForPreTraining(_snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
UpperCAmelCase = model[0].eval()
recursively_load_weights(_snake_case , _snake_case )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_UpperCamelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 234
| 1
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int=13 , UpperCamelCase__ : Union[str, Any]=30 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : List[str]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : int=10 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : List[str]=0.6 , UpperCamelCase__ : Dict=None , ) -> str:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = mask_ratio
__magic_name__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__magic_name__ = (image_size // patch_size) ** 2
__magic_name__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowercase ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
__magic_name__ = TFViTMAEModel(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
__magic_name__ = TFViTMAEForPreTraining(UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , training=UpperCamelCase__ )
# expected sequence length = num_patches
__magic_name__ = (self.image_size // self.patch_size) ** 2
__magic_name__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = TFViTMAEForPreTraining(UpperCamelCase__ )
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(UpperCamelCase__ , training=UpperCamelCase__ )
__magic_name__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
((__magic_name__) , (__magic_name__) , (__magic_name__)) = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a__ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = TFViTMAEModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
__magic_name__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
__magic_name__ = outputs_dict[0].numpy()
__magic_name__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase__ : int ):
__magic_name__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase__ ):
__magic_name__ = v.numpy()
else:
__magic_name__ = np.array(UpperCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = prepare_numpy_arrays(UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
__magic_name__ = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ = tf.constant(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__magic_name__ = tf_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(UpperCamelCase__ , UpperCamelCase__ ),)
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase__ , """_keras_serializable""" , UpperCamelCase__ )
}
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ = tf.convert_to_tensor(UpperCamelCase__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
__magic_name__ = main_layer_class(UpperCamelCase__ )
__magic_name__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__magic_name__ = tf.keras.Model(UpperCamelCase__ , outputs=main_layer(UpperCamelCase__ ) )
__magic_name__ = model(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = os.path.join(UpperCamelCase__ , """keras_model.h5""" )
model.save(UpperCamelCase__ )
__magic_name__ = tf.keras.models.load_model(
UpperCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase__ , tf.keras.Model )
__magic_name__ = model(UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@slow
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ = outputs.last_hidden_state.numpy()
__magic_name__ = 0
else:
__magic_name__ = outputs.logits.numpy()
__magic_name__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
__magic_name__ = model_class.from_pretrained(UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ = after_outputs["""last_hidden_state"""].numpy()
__magic_name__ = 0
else:
__magic_name__ = after_outputs["""logits"""].numpy()
__magic_name__ = 0
__magic_name__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1E-5 )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , noise=UpperCamelCase__ )
__magic_name__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase__ )
__magic_name__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__magic_name__ = model_class.from_config(model.config )
__magic_name__ = new_model(UpperCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
__magic_name__ = new_model(UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
@slow
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
__magic_name__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCamelCase__ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
np.random.seed(2 )
__magic_name__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__magic_name__ = ViTMAEConfig()
__magic_name__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__magic_name__ = np.random.uniform(size=(1, num_patches) )
# forward pass
__magic_name__ = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
# verify the logits
__magic_name__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
__magic_name__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
| 88
|
"""simple docstring"""
from maths.prime_factors import prime_factors
def __magic_name__ ( lowercase ):
if not isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(lowercase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173
| 0
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "linear"
lowerCAmelCase_ = "cosine"
lowerCAmelCase_ = "cosine_with_restarts"
lowerCAmelCase_ = "polynomial"
lowerCAmelCase_ = "constant"
lowerCAmelCase_ = "constant_with_warmup"
lowerCAmelCase_ = "piecewise_constant"
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 ):
return LambdaLR(_SCREAMING_SNAKE_CASE , lambda _SCREAMING_SNAKE_CASE : 1 , last_epoch=_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 ):
def lr_lambda(_SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1.0 , _SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 ):
_snake_case = {}
_snake_case = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
_snake_case, _snake_case = rule_str.split(""":""" )
_snake_case = int(_SCREAMING_SNAKE_CASE )
_snake_case = float(_SCREAMING_SNAKE_CASE )
_snake_case = value
_snake_case = float(rule_list[-1] )
def create_rules_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
def rule_func(_SCREAMING_SNAKE_CASE ) -> float:
_snake_case = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case = create_rules_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=-1 ):
def lr_lambda(_SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.5 , _SCREAMING_SNAKE_CASE = -1 ):
def lr_lambda(_SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) )
_snake_case = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = -1 ):
def lr_lambda(_SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) )
_snake_case = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1E-7 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=-1 ):
_snake_case = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case = lr_init - lr_end
_snake_case = num_training_steps - num_warmup_steps
_snake_case = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = -1 , ):
_snake_case = SchedulerType(_SCREAMING_SNAKE_CASE )
_snake_case = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_SCREAMING_SNAKE_CASE , step_rules=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , num_training_steps=_SCREAMING_SNAKE_CASE , num_cycles=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , num_training_steps=_SCREAMING_SNAKE_CASE , power=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE , )
return schedule_func(
_SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , num_training_steps=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
| 270
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_SCREAMING_SNAKE_CASE , max_perimeter + 1 ):
_snake_case = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_SCREAMING_SNAKE_CASE ):
_snake_case = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 1000 ):
_snake_case = pythagorean_triple(_SCREAMING_SNAKE_CASE )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 270
| 1
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = 1
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[Any] = (32, 32)
lowerCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
def extract(*snake_case__ , **snake_case__ ):
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : int = torch.ones([0] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.pixel_values.to(snake_case__ )
return self
return Out()
return extract
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Dict = self.dummy_cond_unet
lowerCAmelCase : List[str] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
lowerCAmelCase : Union[str, Any] = self.dummy_vae
lowerCAmelCase : List[Any] = self.dummy_text_encoder
lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase : Any = StableDiffusionPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase : List[str] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Union[str, Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCAmelCase : int = sd_pipe([prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase : str = output.images
lowerCAmelCase : List[str] = torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=snake_case__ , )[0]
lowerCAmelCase : Any = image[0, -3:, -3:, -1]
lowerCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Any = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[Any] = self.dummy_cond_unet
lowerCAmelCase : Any = PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCAmelCase : Union[str, Any] = self.dummy_vae
lowerCAmelCase : str = self.dummy_text_encoder
lowerCAmelCase : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase : Union[str, Any] = StableDiffusionPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase : Optional[int] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Union[str, Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase : List[str] = torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCAmelCase : str = sd_pipe([prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase : Union[str, Any] = output.images
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCAmelCase : Tuple = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=snake_case__ , )[0]
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : List[Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
assert isinstance(pipe.scheduler , snake_case__ )
assert pipe.safety_checker is None
lowerCAmelCase : Dict = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowerCAmelCase : Dict = StableDiffusionPipeline.from_pretrained(snake_case__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase : str = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.dummy_cond_unet
lowerCAmelCase : Dict = PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCAmelCase : str = self.dummy_vae
lowerCAmelCase : List[Any] = self.dummy_text_encoder
lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
lowerCAmelCase : str = unet.half()
lowerCAmelCase : Union[str, Any] = vae.half()
lowerCAmelCase : Any = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase : str = StableDiffusionPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase : Optional[Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : List[Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase : Any = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=snake_case__ )
lowerCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase : Tuple = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Dict = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
lowerCAmelCase : Any = 4_003_660_346
lowerCAmelCase : List[str] = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCAmelCase : Dict = torch.manual_seed(snake_case__ )
lowerCAmelCase : str = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase : Optional[int] = output.images
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase : str = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCAmelCase : int = torch.manual_seed(snake_case__ )
lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase : Optional[Any] = output.images
lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase : Tuple = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=snake_case__ )
lowerCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase : Optional[Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : str = "padme amidala taking a bath artwork, safe for work, no nudity"
lowerCAmelCase : Tuple = 2_734_971_755
lowerCAmelCase : Dict = 7
lowerCAmelCase : str = torch.manual_seed(snake_case__ )
lowerCAmelCase : List[str] = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase : List[Any] = output.images
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase : Any = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Union[str, Any] = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase : List[Any] = output.images
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase : Optional[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
lowerCAmelCase : Union[str, Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[int] = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
lowerCAmelCase : Dict = 1_044_355_234
lowerCAmelCase : Any = 12
lowerCAmelCase : Tuple = torch.manual_seed(snake_case__ )
lowerCAmelCase : Optional[Any] = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase : int = output.images
lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase : int = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Optional[Any] = sd_pipe(
[prompt] , generator=snake_case__ , guidance_scale=snake_case__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase : Optional[int] = output.images
lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 108
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'MCTCTFeatureExtractor'
a : str = 'AutoTokenizer'
def __init__( self : Tuple , __lowercase : int , __lowercase : Dict ) -> Any:
super().__init__(__lowercase , __lowercase )
__UpperCAmelCase : Optional[Any] = self.feature_extractor
__UpperCAmelCase : Optional[int] = False
def __call__( self : int , *__lowercase : Tuple , **__lowercase : Optional[int] ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowercase , **__lowercase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__UpperCAmelCase : Dict = kwargs.pop("""raw_speech""" )
else:
__UpperCAmelCase : Dict = kwargs.pop("""audio""" , __lowercase )
__UpperCAmelCase : List[str] = kwargs.pop("""sampling_rate""" , __lowercase )
__UpperCAmelCase : Tuple = kwargs.pop("""text""" , __lowercase )
if len(__lowercase ) > 0:
__UpperCAmelCase : Tuple = args[0]
__UpperCAmelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__UpperCAmelCase : Tuple = self.feature_extractor(__lowercase , *__lowercase , sampling_rate=__lowercase , **__lowercase )
if text is not None:
__UpperCAmelCase : str = self.tokenizer(__lowercase , **__lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCAmelCase : Dict = encodings["""input_ids"""]
return inputs
def UpperCAmelCase ( self : Optional[Any] , *__lowercase : List[Any] , **__lowercase : int ) -> List[Any]:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def UpperCAmelCase ( self : Optional[int] , *__lowercase : Optional[Any] , **__lowercase : List[str] ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowercase , **__lowercase )
__UpperCAmelCase : Optional[int] = kwargs.pop("""input_features""" , __lowercase )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""labels""" , __lowercase )
if len(__lowercase ) > 0:
__UpperCAmelCase : Union[str, Any] = args[0]
__UpperCAmelCase : str = args[1:]
if input_features is not None:
__UpperCAmelCase : Any = self.feature_extractor.pad(__lowercase , *__lowercase , **__lowercase )
if labels is not None:
__UpperCAmelCase : Union[str, Any] = self.tokenizer.pad(__lowercase , **__lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__UpperCAmelCase : Any = labels["""input_ids"""]
return input_features
def UpperCAmelCase ( self : Any , *__lowercase : Union[str, Any] , **__lowercase : Dict ) -> List[Any]:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@contextmanager
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__UpperCAmelCase : Any = True
__UpperCAmelCase : Optional[int] = self.tokenizer
yield
__UpperCAmelCase : List[Any] = self.feature_extractor
__UpperCAmelCase : int = False
| 114
| 0
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE :Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Union[str, Any] = XLMProphetNetTokenizer
UpperCamelCase_ :Any = False
UpperCamelCase_ :Any = True
def UpperCAmelCase_ ( self )-> Any:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = "[PAD]"
UpperCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_lowercase ) , 1_012 )
def UpperCAmelCase_ ( self )-> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase )
UpperCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCamelCase_ = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCAmelCase_ ( self )-> Dict:
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = "Hello World!"
UpperCamelCase_ = [35_389, 6_672, 49, 2]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def UpperCAmelCase_ ( self )-> List[str]:
# fmt: off
UpperCamelCase_ = {"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 362
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE :Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Optional[int] = XGLMTokenizer
UpperCamelCase_ :List[str] = XGLMTokenizerFast
UpperCamelCase_ :int = True
UpperCamelCase_ :Dict = True
def UpperCAmelCase_ ( self )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = XGLMTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = "<pad>"
UpperCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(_lowercase ) , 1_008 )
def UpperCAmelCase_ ( self )-> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = XGLMTokenizer(_lowercase , keep_accents=_lowercase )
UpperCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCamelCase_ = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCAmelCase_ ( self )-> Optional[Any]:
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_lowercase , f.name )
UpperCamelCase_ = XGLMTokenizer(f.name , keep_accents=_lowercase )
UpperCamelCase_ = pickle.dumps(_lowercase )
pickle.loads(_lowercase )
def UpperCAmelCase_ ( self )-> str:
if not self.test_rust_tokenizer:
return
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = "I was born in 92000, and this is falsé."
UpperCamelCase_ = tokenizer.tokenize(_lowercase )
UpperCamelCase_ = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
UpperCamelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
UpperCamelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = tokenizer.encode(_lowercase )
UpperCamelCase_ = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = "Hello World!"
UpperCamelCase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
UpperCamelCase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
# fmt: off
UpperCamelCase_ = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="facebook/xglm-564M" , padding=_lowercase , )
| 60
| 0
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
lowercase__ = TypeVar("""_T""")
class __lowerCamelCase ( Generic[_T] ):
'''simple docstring'''
def __init__( self : Optional[int] , a_ : Iterable[_T] | None = None ):
lowerCAmelCase_ : list[_T] = list(iterable or [] )
lowerCAmelCase_ : list[_T] = []
def __len__( self : str ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[Any] ):
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def lowerCamelCase ( self : List[str] , a_ : _T ):
self._stacka.append(a_ )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : int = self._stacka.pop
lowerCAmelCase_ : Any = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 241
|
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __lowerCamelCase ( __UpperCamelCase ) -> Dict:
"""simple docstring"""
return EnvironmentCommand()
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
@staticmethod
def lowerCamelCase ( a_ : ArgumentParser ):
lowerCAmelCase_ : str = parser.add_parser("env" )
download_parser.set_defaults(func=a_ )
download_parser.add_argument(
"--accelerate-config_file" , default=a_ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=a_ )
def __init__( self : Dict , a_ : Dict , *a_ : str ):
lowerCAmelCase_ : Union[str, Any] = accelerate_config_file
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Optional[int] = "not installed"
if is_safetensors_available():
import safetensors
lowerCAmelCase_ : int = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
lowerCAmelCase_ : Optional[Any] = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
lowerCAmelCase_ : List[Any] = "not installed"
lowerCAmelCase_ : Dict = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCAmelCase_ : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a_ ):
lowerCAmelCase_ : int = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCAmelCase_ : Any = (
"\n".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(a_ , a_ )
else f'''\t{accelerate_config}'''
)
lowerCAmelCase_ : Union[str, Any] = "not installed"
lowerCAmelCase_ : Dict = "NA"
if is_torch_available():
import torch
lowerCAmelCase_ : Tuple = torch.__version__
lowerCAmelCase_ : Union[str, Any] = torch.cuda.is_available()
lowerCAmelCase_ : List[str] = "not installed"
lowerCAmelCase_ : Tuple = "NA"
if is_tf_available():
import tensorflow as tf
lowerCAmelCase_ : Union[str, Any] = tf.__version__
try:
# deprecated in v2.1
lowerCAmelCase_ : Tuple = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCAmelCase_ : List[str] = bool(tf.config.list_physical_devices("GPU" ) )
lowerCAmelCase_ : Optional[Any] = "not installed"
lowerCAmelCase_ : Optional[int] = "not installed"
lowerCAmelCase_ : Tuple = "not installed"
lowerCAmelCase_ : Tuple = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
lowerCAmelCase_ : List[Any] = flax.__version__
lowerCAmelCase_ : Tuple = jax.__version__
lowerCAmelCase_ : List[Any] = jaxlib.__version__
lowerCAmelCase_ : str = jax.lib.xla_bridge.get_backend().platform
lowerCAmelCase_ : Dict = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f'''{safetensors_version}''',
"Accelerate version": f'''{accelerate_version}''',
"Accelerate config": f'''{accelerate_config_str}''',
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"Tensorflow version (GPU?)": f'''{tf_version} ({tf_cuda_available})''',
"Flax version (CPU?/GPU?/TPU?)": f'''{flax_version} ({jax_backend})''',
"Jax version": f'''{jax_version}''',
"JaxLib version": f'''{jaxlib_version}''',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a_ ) )
return info
@staticmethod
def lowerCamelCase ( a_ : Tuple ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 241
| 1
|
"""simple docstring"""
import torch
from torch import nn
class lowerCamelCase (nn.Module ):
"""simple docstring"""
def __init__( self : Dict , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int]=1 , __magic_name__ : int=False ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE_ = n_token
SCREAMING_SNAKE_CASE_ = d_embed
SCREAMING_SNAKE_CASE_ = d_proj
SCREAMING_SNAKE_CASE_ = cutoffs + [n_token]
SCREAMING_SNAKE_CASE_ = [0] + self.cutoffs
SCREAMING_SNAKE_CASE_ = div_val
SCREAMING_SNAKE_CASE_ = self.cutoffs[0]
SCREAMING_SNAKE_CASE_ = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE_ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.zeros(self.n_clusters ) )
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
SCREAMING_SNAKE_CASE_ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCamelCase , __lowerCamelCase ) ) )
else:
self.out_projs.append(__lowerCamelCase )
self.out_layers.append(nn.Linear(__lowerCamelCase , __lowerCamelCase ) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCamelCase , __lowerCamelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCamelCase , r_idx - l_idx ) )
SCREAMING_SNAKE_CASE_ = keep_order
def __A ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] ) -> Union[str, Any]:
if proj is None:
SCREAMING_SNAKE_CASE_ = nn.functional.linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
SCREAMING_SNAKE_CASE_ = nn.functional.linear(__lowerCamelCase , proj.t().contiguous() )
SCREAMING_SNAKE_CASE_ = nn.functional.linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __A ( self : Dict , __magic_name__ : List[str] , __magic_name__ : int=None , __magic_name__ : Optional[Any]=False ) -> int:
if labels is not None:
# Shift so that tokens < n predict n
SCREAMING_SNAKE_CASE_ = hidden[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE_ = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE_ = hidden.view(-1 , hidden.size(-1 ) )
SCREAMING_SNAKE_CASE_ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
SCREAMING_SNAKE_CASE_ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE_ = self._compute_logit(__lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
SCREAMING_SNAKE_CASE_ = labels != -100
SCREAMING_SNAKE_CASE_ = torch.zeros_like(__lowerCamelCase , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE_ = (
-nn.functional.log_softmax(__lowerCamelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(__lowerCamelCase , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE_ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE_ = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE_ = self.out_layers[i].weight
SCREAMING_SNAKE_CASE_ = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE_ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCamelCase )
biases.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE_ = self._compute_logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(__lowerCamelCase , dim=1 )
if labels is None:
SCREAMING_SNAKE_CASE_ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
SCREAMING_SNAKE_CASE_ = torch.zeros_like(__lowerCamelCase , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = [0] + self.cutoffs
for i in range(len(__lowerCamelCase ) - 1 ):
SCREAMING_SNAKE_CASE_ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
SCREAMING_SNAKE_CASE_ = (labels >= l_idx) & (labels < r_idx)
SCREAMING_SNAKE_CASE_ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
SCREAMING_SNAKE_CASE_ = labels.index_select(0 , __lowerCamelCase ) - l_idx
SCREAMING_SNAKE_CASE_ = head_logprob.index_select(0 , __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = hidden.index_select(0 , __lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = hidden
if i == 0:
if labels is not None:
SCREAMING_SNAKE_CASE_ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE_ = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE_ = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE_ = self._compute_logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(__lowerCamelCase , dim=1 )
SCREAMING_SNAKE_CASE_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
SCREAMING_SNAKE_CASE_ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE_ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
SCREAMING_SNAKE_CASE_ = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCamelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __A ( self : List[Any] , __magic_name__ : int ) -> List[Any]:
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE_ = self._compute_logit(__lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCamelCase , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE_ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE_ = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE_ = self.out_layers[i].weight
SCREAMING_SNAKE_CASE_ = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE_ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCamelCase )
biases.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE_ = self._compute_logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(__lowerCamelCase , dim=1 )
SCREAMING_SNAKE_CASE_ = [0] + self.cutoffs
for i in range(len(__lowerCamelCase ) - 1 ):
SCREAMING_SNAKE_CASE_ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
SCREAMING_SNAKE_CASE_ = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE_ = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE_ = self._compute_logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(__lowerCamelCase , dim=1 )
SCREAMING_SNAKE_CASE_ = head_logprob[:, -i] + tail_logprob_i
SCREAMING_SNAKE_CASE_ = logprob_i
return out
| 356
|
import torch
def a__ ( ):
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 305
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Tuple = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mobilenet_v2"
def __init__(self , A=3 , A=2_2_4 , A=1.0 , A=8 , A=8 , A=6 , A=3_2 , A=True , A=True , A="relu6" , A=True , A=0.8 , A=0.02 , A=0.0_01 , A=2_5_5 , **A , ):
super().__init__(**A )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
lowerCamelCase_ : Optional[int] = num_channels
lowerCamelCase_ : Tuple = image_size
lowerCamelCase_ : Dict = depth_multiplier
lowerCamelCase_ : Optional[Any] = depth_divisible_by
lowerCamelCase_ : Dict = min_depth
lowerCamelCase_ : Optional[Any] = expand_ratio
lowerCamelCase_ : Tuple = output_stride
lowerCamelCase_ : Dict = first_layer_is_expansion
lowerCamelCase_ : Union[str, Any] = finegrained_output
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : Tuple = tf_padding
lowerCamelCase_ : List[str] = classifier_dropout_prob
lowerCamelCase_ : List[Any] = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Dict = semantic_loss_ignore_index
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = version.parse("1.11" )
@property
def UpperCAmelCase__ (self ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def UpperCAmelCase__ (self ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def UpperCAmelCase__ (self ):
return 1E-4
| 318
|
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 1
|
from __future__ import annotations
import os
from collections.abc import Mapping
a__ : Optional[int] = tuple[int, int]
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase ) -> None:
__UpperCamelCase = vertices
__UpperCamelCase = {
(min(lowercase ), max(lowercase )): weight for edge, weight in edges.items()
}
def __lowerCamelCase ( self , lowercase , lowercase ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__UpperCamelCase = weight
def __lowerCamelCase ( self ) -> Graph:
__UpperCamelCase = Graph({min(self.vertices )} , {} )
__UpperCamelCase = 4_2
__UpperCamelCase = 4_2
__UpperCamelCase = 4_2
__UpperCamelCase = 4_2
while len(subgraph.vertices ) < len(self.vertices ):
__UpperCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__UpperCamelCase = edge
__UpperCamelCase = weight
subgraph.add_edge(lowercase , lowercase )
return subgraph
def _lowercase ( __A = "p107_network.txt" ):
'''simple docstring'''
__UpperCamelCase = os.path.abspath(os.path.dirname(__A ) )
__UpperCamelCase = os.path.join(__A ,__A )
__UpperCamelCase = {}
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
with open(__A ) as f:
__UpperCamelCase = f.read().strip().split("""\n""" )
__UpperCamelCase = [line.split(""",""" ) for line in data]
for edgea in range(1 ,len(__A ) ):
for edgea in range(__A ):
if adjaceny_matrix[edgea][edgea] != "-":
__UpperCamelCase = int(adjaceny_matrix[edgea][edgea] )
__UpperCamelCase = Graph(set(range(len(__A ) ) ) ,__A )
__UpperCamelCase = graph.prims_algorithm()
__UpperCamelCase = sum(graph.edges.values() )
__UpperCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 350
|
'''simple docstring'''
from PIL import Image
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = image.size
__UpperCamelCase = 0
__UpperCamelCase = image.load()
for i in range(__A ):
for j in range(__A ):
__UpperCamelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
__UpperCamelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
a__ : Optional[int] = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 243
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a = 16
__a = 32
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 16 ) ->Optional[int]:
"""simple docstring"""
lowercase : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase : Any = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase : int = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=snake_case_, max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase : Dict = datasets.map(
snake_case_, batched=snake_case_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : List[Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase : Any = 16
elif accelerator.mixed_precision != "no":
lowercase : int = 8
else:
lowercase : str = None
return tokenizer.pad(
snake_case_, padding='''longest''', max_length=snake_case_, pad_to_multiple_of=snake_case_, return_tensors='''pt''', )
# Instantiate dataloaders.
lowercase : Dict = DataLoader(
tokenized_datasets['''train'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
lowercase : str = DataLoader(
tokenized_datasets['''validation'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a = mocked_dataloaders # noqa: F811
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', snake_case_ ) == "1":
lowercase : Optional[Any] = 2
# New Code #
lowercase : str = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase : Dict = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=snake_case_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : int = config["""lr"""]
lowercase : str = int(config['''num_epochs'''] )
lowercase : Tuple = int(config['''seed'''] )
lowercase : Optional[Any] = int(config['''batch_size'''] )
lowercase : int = evaluate.load('''glue''', '''mrpc''' )
set_seed(snake_case_ )
lowercase : Dict = get_dataloaders(snake_case_, snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : Dict = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
lowercase : Any = AdamW(params=model.parameters(), lr=snake_case_ )
# Instantiate scheduler
lowercase : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=snake_case_, num_warmup_steps=100, num_training_steps=(len(snake_case_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase : List[Any] = accelerator.prepare(
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case_ ):
lowercase : List[Any] = model(**snake_case_ )
lowercase : Dict = output.loss
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : Dict = model(**snake_case_ )
lowercase : Any = outputs.logits.argmax(dim=-1 )
lowercase : Tuple = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case_, references=snake_case_, )
lowercase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", snake_case_ )
def __lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=snake_case_, default=snake_case_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''', type=snake_case_, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
lowercase : Tuple = parser.parse_args()
lowercase : List[str] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case_, snake_case_ )
if __name__ == "__main__":
main()
| 337
|
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return np.maximum(0,snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 26
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Union[str, Any] = "van"
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[64, 128, 320, 512] , _SCREAMING_SNAKE_CASE=[3, 3, 12, 3] , _SCREAMING_SNAKE_CASE=[8, 8, 4, 4] , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-6 , _SCREAMING_SNAKE_CASE=1E-2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , **_SCREAMING_SNAKE_CASE , )-> Union[str, Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =image_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =patch_sizes
lowerCamelCase_ =strides
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =depths
lowerCamelCase_ =mlp_ratios
lowerCamelCase_ =hidden_act
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =layer_scale_init_value
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =dropout_rate
| 49
|
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE )-> None:
lowerCamelCase_ =data
lowerCamelCase_ =None
lowerCamelCase_ =None
def __UpperCamelCase ( _A : Node | None ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __UpperCamelCase ( _A : Node | None ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __UpperCamelCase ( _A : Node ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __UpperCamelCase ( ) ->None: # Main function for testing.
"""simple docstring"""
lowerCamelCase_ =Node(1 )
lowerCamelCase_ =Node(2 )
lowerCamelCase_ =Node(3 )
lowerCamelCase_ =Node(4 )
lowerCamelCase_ =Node(5 )
lowerCamelCase_ =Node(6 )
lowerCamelCase_ =Node(7 )
lowerCamelCase_ =Node(8 )
lowerCamelCase_ =Node(9 )
print(is_full_binary_tree(_A ) )
print(depth_of_tree(_A ) )
print("""Tree is: """ )
display(_A )
if __name__ == "__main__":
main()
| 49
| 1
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : List[Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ , snake_case__ : List[str] = emb.weight.shape
snake_case__ : List[Any] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
snake_case__ : str = emb.weight.data
return lin_layer
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = torch.load(_lowerCAmelCase , map_location="""cpu""" )
snake_case__ : Any = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
snake_case__ : List[str] = mam_aaa["""model"""]
remove_ignore_keys_(_lowerCAmelCase )
snake_case__ : Tuple = state_dict["""encoder.embed_tokens.weight"""].shape[0]
snake_case__ : int = MaMaaaConfig(
vocab_size=_lowerCAmelCase , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
snake_case__ : int = state_dict["""decoder.embed_tokens.weight"""]
snake_case__ : int = MaMaaaForConditionalGeneration(_lowerCAmelCase )
model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
snake_case__ : Tuple = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__a = parser.parse_args()
__a = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 35
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
def update_area_of_max_square(UpperCAmelCase , UpperCAmelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase__ : int = update_area_of_max_square(UpperCAmelCase , col + 1 )
lowercase__ : Any = update_area_of_max_square(row + 1 , col + 1 )
lowercase__ : str = update_area_of_max_square(row + 1 , UpperCAmelCase )
if mat[row][col]:
lowercase__ : List[Any] = 1 + min([right, diagonal, down] )
lowercase__ : List[Any] = max(largest_square_area[0] , UpperCAmelCase )
return sub_problem_sol
else:
return 0
lowercase__ : Dict = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
def update_area_of_max_square_using_dp_array(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase__ : int = update_area_of_max_square_using_dp_array(UpperCAmelCase , col + 1 , UpperCAmelCase )
lowercase__ : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , UpperCAmelCase )
lowercase__ : Any = update_area_of_max_square_using_dp_array(row + 1 , UpperCAmelCase , UpperCAmelCase )
if mat[row][col]:
lowercase__ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase__ : Any = max(largest_square_area[0] , UpperCAmelCase )
lowercase__ : int = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase__ : Any = [0]
lowercase__ : List[Any] = [[-1] * cols for _ in range(UpperCAmelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , UpperCAmelCase )
return largest_square_area[0]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Optional[int] = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase__ : str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase__ : str = dp_array[row][col + 1]
lowercase__ : Optional[Any] = dp_array[row + 1][col + 1]
lowercase__ : str = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase__ : Dict = 1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase__ : str = max(dp_array[row][col] , UpperCAmelCase )
else:
lowercase__ : Any = 0
return largest_square_area
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[str] = [0] * (cols + 1)
lowercase__ : str = [0] * (cols + 1)
lowercase__ : Tuple = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase__ : List[Any] = current_row[col + 1]
lowercase__ : Any = next_row[col + 1]
lowercase__ : Optional[Any] = next_row[col]
if mat[row][col] == 1:
lowercase__ : List[str] = 1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase__ : List[str] = max(current_row[col] , UpperCAmelCase )
else:
lowercase__ : int = 0
lowercase__ : int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 198
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _a , _a ):
snake_case : Any = '''resnet'''
snake_case : List[Any] = ['''basic''', '''bottleneck''']
def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=64 , __lowerCAmelCase=[256, 512, 1024, 2048] , __lowerCAmelCase=[3, 4, 6, 3] , __lowerCAmelCase="bottleneck" , __lowerCAmelCase="relu" , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
super().__init__(**_snake_case )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
UpperCamelCase__ = num_channels
UpperCamelCase__ = embedding_size
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = layer_type
UpperCamelCase__ = hidden_act
UpperCamelCase__ = downsample_in_first_stage
UpperCamelCase__ = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(_snake_case ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : str = version.parse("""1.11""" )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowerCamelCase ( self ):
return 1E-3
| 357
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : List[Any] = """upernet"""
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=512 , __lowerCAmelCase=0.02 , __lowerCAmelCase=[1, 2, 3, 6] , __lowerCAmelCase=True , __lowerCAmelCase=0.4 , __lowerCAmelCase=384 , __lowerCAmelCase=256 , __lowerCAmelCase=1 , __lowerCAmelCase=False , __lowerCAmelCase=255 , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = backbone_config.get("""model_type""" )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(__lowerCAmelCase )
UpperCamelCase__ = backbone_config
UpperCamelCase__ = hidden_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = pool_scales
UpperCamelCase__ = use_auxiliary_head
UpperCamelCase__ = auxiliary_loss_weight
UpperCamelCase__ = auxiliary_in_channels
UpperCamelCase__ = auxiliary_channels
UpperCamelCase__ = auxiliary_num_convs
UpperCamelCase__ = auxiliary_concat_input
UpperCamelCase__ = loss_ignore_index
def _lowerCamelCase ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 87
| 0
|
"""simple docstring"""
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowercase="sayef/fsner-bert-base-uncased" ):
super(lowercase , self ).__init__()
_lowerCamelCase : Optional[int] = AutoModel.from_pretrained(lowercase , return_dict=lowercase )
_lowerCamelCase : str = torch.nn.CosineSimilarity(3 , 1E-08 )
_lowerCamelCase : Optional[int] = torch.nn.Softmax(dim=1 )
def A_ ( self , **lowercase ):
return self.bert(**lowercase ).last_hidden_state
def A_ ( self , lowercase ):
return token_embeddings.sum(2 , keepdim=lowercase )
def A_ ( self , lowercase , lowercase , lowercase=1 ):
return self.softmax(T * self.cos(lowercase , lowercase ) )
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : List[str] = W_supports['sizes'].tolist()
_lowerCamelCase : List[Any] = W_supports['start_token_id'].item()
_lowerCamelCase : Optional[Any] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_lowerCamelCase : Union[str, Any] = self.BERT(**lowercase )
_lowerCamelCase : Any = self.BERT(**lowercase )
_lowerCamelCase : str = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Optional[Any] = W_supports['input_ids'] == start_token_id
_lowerCamelCase : Optional[int] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowercase ):
if i == 0:
_lowerCamelCase : str = 0
else:
_lowerCamelCase : List[Any] = support_sizes[i - 1]
_lowerCamelCase : str = S[s : s + size][start_token_masks[s : s + size]]
_lowerCamelCase : List[Any] = S[s : s + size][end_token_masks[s : s + size]]
_lowerCamelCase : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_lowerCamelCase : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_lowerCamelCase : Optional[Any] = torch.vstack((p_starts, p_start) )
_lowerCamelCase : Dict = torch.vstack((p_ends, p_end) )
else:
_lowerCamelCase : Optional[Any] = p_start
_lowerCamelCase : Optional[int] = p_end
return p_starts, p_ends
| 96
|
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowercase__ = get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = """dummy_data"""
lowerCamelCase__ = """datasets"""
lowerCamelCase__ = False
def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , lowercase = True , lowercase = None , ):
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Dict = dataset_name
_lowerCamelCase : Union[str, Any] = cache_dir
_lowerCamelCase : Dict = use_local_dummy_data
_lowerCamelCase : Tuple = config
# download_callbacks take a single url as input
_lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase : str = str(lowercase )
# to be downloaded
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : int = None
@property
def A_ ( self ):
if self._dummy_file is None:
_lowerCamelCase : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def A_ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def A_ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def A_ ( self ):
_lowerCamelCase : List[str] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase : int = cached_path(
lowercase , cache_dir=self.cache_dir , extract_compressed_file=lowercase , force_extract=lowercase )
return os.path.join(lowercase , self.dummy_file_name )
@property
def A_ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def A_ ( self ):
if self._bucket_url is None:
_lowerCamelCase : List[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def A_ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def A_ ( self , lowercase , *lowercase ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase , lowercase ):
return self.create_dummy_data_dict(lowercase , lowercase )
elif isinstance(lowercase , (list, tuple) ):
return self.create_dummy_data_list(lowercase , lowercase )
else:
return self.create_dummy_data_single(lowercase , lowercase )
def A_ ( self , lowercase , *lowercase ):
return self.download_and_extract(lowercase )
def A_ ( self , lowercase , lowercase ):
return self.download_and_extract(lowercase )
def A_ ( self , lowercase , *lowercase , **lowercase ):
return path
def A_ ( self ):
return {}
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase , lowercase ):
for single_url in single_urls:
download_callback(lowercase )
else:
_lowerCamelCase : List[Any] = single_urls
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase , lowercase ):
_lowerCamelCase : List[Any] = [os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) for x in single_urls]
else:
_lowerCamelCase : Optional[int] = single_urls
_lowerCamelCase : List[Any] = os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) )
_lowerCamelCase : int = value
# make sure that values are unique
if all(isinstance(lowercase , lowercase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase : List[str] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , lowercase ) ) for url in data_url )
_lowerCamelCase : int = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase : List[str] = [data_url[0]] * len(lowercase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : str = os.path.join(lowercase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(lowercase )
return dummy_data_list
def A_ ( self , lowercase , lowercase ):
for download_callback in self.download_callbacks:
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : Tuple = os.path.join(lowercase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(lowercase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self , lowercase ):
def _iter_archive_members(lowercase ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase : str = Path(self.dummy_file ).parent
_lowerCamelCase : Union[str, Any] = path.relative_to(lowercase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowercase )
_lowerCamelCase : Optional[int] = Path(lowercase )
_lowerCamelCase : Dict = _iter_archive_members(lowercase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(lowercase ).as_posix(), file_path.open('rb' )
def A_ ( self , lowercase ):
if not isinstance(lowercase , lowercase ):
_lowerCamelCase : List[str] = [paths]
for path in paths:
if os.path.isfile(lowercase ):
if os.path.basename(lowercase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase ):
if os.path.basename(lowercase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(lowercase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(lowercase , lowercase )
| 96
| 1
|
"""simple docstring"""
def snake_case ( ):
return [list(range(10_00 - i ,-10_00 - i ,-1 ) ) for i in range(10_00 )]
lowerCamelCase_ = generate_large_matrix()
lowerCamelCase_ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def snake_case ( A__ ):
assert all(row == sorted(_a ,reverse=_a ) for row in grid )
assert all(list(_a ) == sorted(_a ,reverse=_a ) for col in zip(*_a ) )
def snake_case ( A__ ):
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[str] = len(_a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase_ : str = (left + right) // 2
UpperCAmelCase_ : Any = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase_ : Optional[int] = mid + 1
else:
UpperCAmelCase_ : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_a )
def snake_case ( A__ ):
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Optional[Any] = len(grid[0] )
for i in range(len(_a ) ):
UpperCAmelCase_ : Tuple = find_negative_index(grid[i][:bound] )
total += bound
return (len(_a ) * len(grid[0] )) - total
def snake_case ( A__ ):
return len([number for row in grid for number in row if number < 0] )
def snake_case ( A__ ):
UpperCAmelCase_ : List[Any] = 0
for row in grid:
for i, number in enumerate(_a ):
if number < 0:
total += len(_a ) - i
break
return total
def snake_case ( ):
from timeit import timeit
print("Running benchmarks" )
UpperCAmelCase_ : Any = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase_ : Any = timeit(F"""{func}(grid=grid)""" ,setup=_a ,number=5_00 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 361
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''table-transformer'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Optional[Any]=100 , lowerCAmelCase_ : Optional[int]=6 , lowerCAmelCase_ : List[Any]=2_048 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : List[Any]=2_048 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=0.0_2 , lowerCAmelCase_ : Any=1.0 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Dict="sine" , lowerCAmelCase_ : Optional[Any]="resnet50" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : List[Any]=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> Union[str, Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = backbone_config.get("model_type" )
UpperCAmelCase_ : str = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowerCAmelCase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : int = backbone_config
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : Optional[Any] = num_queries
UpperCAmelCase_ : List[str] = d_model
UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = encoder_layers
UpperCAmelCase_ : List[str] = encoder_attention_heads
UpperCAmelCase_ : int = decoder_ffn_dim
UpperCAmelCase_ : int = decoder_layers
UpperCAmelCase_ : Optional[int] = decoder_attention_heads
UpperCAmelCase_ : List[str] = dropout
UpperCAmelCase_ : Dict = attention_dropout
UpperCAmelCase_ : Union[str, Any] = activation_dropout
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : int = init_std
UpperCAmelCase_ : Any = init_xavier_std
UpperCAmelCase_ : Union[str, Any] = encoder_layerdrop
UpperCAmelCase_ : Dict = decoder_layerdrop
UpperCAmelCase_ : Union[str, Any] = encoder_layers
UpperCAmelCase_ : Any = auxiliary_loss
UpperCAmelCase_ : List[str] = position_embedding_type
UpperCAmelCase_ : Dict = backbone
UpperCAmelCase_ : Optional[int] = use_pretrained_backbone
UpperCAmelCase_ : Tuple = dilation
# Hungarian matcher
UpperCAmelCase_ : Optional[Any] = class_cost
UpperCAmelCase_ : List[Any] = bbox_cost
UpperCAmelCase_ : Optional[int] = giou_cost
# Loss coefficients
UpperCAmelCase_ : Optional[int] = mask_loss_coefficient
UpperCAmelCase_ : List[str] = dice_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = bbox_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient
UpperCAmelCase_ : Dict = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.d_model
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float:
return 1e-5
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 12
| 253
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.