code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''sentencepiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
__magic_name__ = {
'''google/rembert''': 2_5_6,
}
class lowerCAmelCase__ ( UpperCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , a_ , a_=False , a_=True , a_=True , a_="[CLS]" , a_="[SEP]" , a_="[UNK]" , a_="[SEP]" , a_="[PAD]" , a_="[CLS]" , a_="[MASK]" , **a_ , ):
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
lowerCamelCase_ : Optional[int] = do_lower_case
lowerCamelCase_ : Tuple = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : List[str] = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(__a )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[Any] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Any = d
lowerCamelCase_ : List[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ , a_=False ):
lowerCamelCase_ : List[str] = self.sp_model.EncodeAsPieces(__a )
return pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(__a )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(__a )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = self.sp_model.decode_pieces(__a )
return out_string
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : List[Any] = [self.sep_token_id]
lowerCamelCase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(__a ):
logger.error("Vocabulary path ({}) should be a directory".format(__a ) )
return
lowerCamelCase_ : Optional[Any] = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 717 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id")
lowerCamelCase_ : Dict = partition_df.collect()
lowerCamelCase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , a_ , a_=None , ):
lowerCamelCase_ : Dict = df
lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = SparkConfig
def __init__( self , a_ , a_ = None , a_ = None , **a_ , ):
import pyspark
lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ : Optional[Any] = df
lowerCamelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(a_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , a_ ):
import pyspark
def get_arrow_batch_size(a_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowerCamelCase_ : str = self.df.count()
lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ : int = self.df.repartition(a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
import pyspark
lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowerCamelCase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ : int = self.config.features
lowerCamelCase_ : Any = self._writer_batch_size
lowerCamelCase_ : Tuple = self._fs.storage_options
def write_arrow(a_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowerCamelCase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowerCamelCase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ):
self._validate_cache_dir()
lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs )
lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join
lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCamelCase_ : int = path_join(self._output_dir , a_ )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowerCamelCase_ : Dict = total_num_examples
lowerCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a_ , a_ , a_ , ):
rename(
a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Dict = 0
for i in range(len(a_ ) ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def _UpperCamelCase ( self , a_ , ):
return SparkExamplesIterable(self.df )
| 73 | 0 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowerCamelCase_ : List[str] = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase)
else:
lowerCamelCase_ : List[str] = max(
mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1]) + val[i - 1] , )
lowerCamelCase_ : str = val
return f[i][j]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [[0] * (w + 1) for _ in range(n + 1)]
for i in range(1 , n + 1):
for w_ in range(1 , w + 1):
if wt[i - 1] <= w_:
lowerCamelCase_ : int = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_])
else:
lowerCamelCase_ : Tuple = dp[i - 1][w_]
return dp[n][w_], dp
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if not (isinstance(_lowercase , (list, tuple)) and isinstance(_lowercase , (list, tuple))):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples")
lowerCamelCase_ : str = len(_lowercase)
if num_items != len(_lowercase):
lowerCamelCase_ : Optional[Any] = (
"The number of weights must be the same as the number of values.\n"
F"""But got {num_items} weights and {len(_lowercase)} values"""
)
raise ValueError(_lowercase)
for i in range(_lowercase):
if not isinstance(wt[i] , _lowercase):
lowerCamelCase_ : List[str] = (
"All weights must be integers but got weight of "
F"""type {type(wt[i])} at index {i}"""
)
raise TypeError(_lowercase)
lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = knapsack(_lowercase , _lowercase , _lowercase , _lowercase)
lowerCamelCase_ : str = set()
_construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
return optimal_val, example_optional_set
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase)
else:
optimal_set.add(_lowercase)
_construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase)
if __name__ == "__main__":
__magic_name__ = [3, 2, 4, 4]
__magic_name__ = [4, 3, 2, 3]
__magic_name__ = 4
__magic_name__ = 6
__magic_name__ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__magic_name__ , __magic_name__ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__magic_name__ , __magic_name__ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 718 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Tuple = """"""
for word_or_phrase in separated:
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise Exception("join() accepts only strings to be joined")
joined += word_or_phrase + separator
return joined.strip(_lowerCamelCase)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 719 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ctrl'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Optional[int] = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Union[str, Any] = n_head
lowerCamelCase_ : str = dff
lowerCamelCase_ : Tuple = resid_pdrop
lowerCamelCase_ : Any = embd_pdrop
lowerCamelCase_ : Dict = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Any = use_cache
super().__init__(**a_ )
| 73 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = get_activation("swish" )
self.assertIsInstance(A__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = get_activation("silu" )
self.assertIsInstance(A__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = get_activation("mish" )
self.assertIsInstance(A__ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = get_activation("gelu" )
self.assertIsInstance(A__ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 720 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
__UpperCAmelCase : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def _UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__UpperCAmelCase : float = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
def _UpperCamelCase ( self ):
if self.train_file is not None:
lowerCamelCase_ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f:
lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())]
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCamelCase_ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Any = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split(".")[-1]
if extension == "txt":
lowerCamelCase_ : List[str] = "text"
lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(F"""New config: {config}""")
lowerCamelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name.")
if model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_)
model.resize_token_embeddings(len(lowerCAmelCase_))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Optional[Any] = datasets["train"].column_names
else:
lowerCamelCase_ : Dict = datasets["validation"].column_names
lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_):
# Remove empty lines
lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length)
lowerCamelCase_ : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCamelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCamelCase_ : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCamelCase_ : Dict = model_args.model_name_or_path
else:
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json"))
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase_ : Tuple = trainer.evaluate()
lowerCamelCase_ : str = math.exp(eval_output["eval_loss"])
lowerCamelCase_ : Tuple = perplexity
lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
return results
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 73 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
__magic_name__ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
__magic_name__ = '''>>zh<<'''
__magic_name__ = '''Helsinki-NLP/'''
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
@require_sentencepiece
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = MarianTokenizer
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : List[str] = True
def _UpperCamelCase ( self ):
super().setUp()
lowerCamelCase_ : Tuple = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowerCamelCase_ : List[Any] = dict(zip(_a , range(len(_a ) ) ) )
lowerCamelCase_ : List[Any] = Path(self.tmpdirname )
save_json(_a , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(_a , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_a , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(_a , save_dir / VOCAB_FILES_NAMES["target_spm"] )
lowerCamelCase_ : List[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self , **a_ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **_a )
def _UpperCamelCase ( self , a_ ):
return (
"This is a test",
"This is a test",
)
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = "</s>"
lowerCamelCase_ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(_a ) , 9 )
def _UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
lowerCamelCase_ : Tuple = en_de_tokenizer(["I am a small frog"] , return_tensors=_a )
self.assertIsInstance(_a , _a )
lowerCamelCase_ : List[str] = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(_a , batch.input_ids[0] )
lowerCamelCase_ : Optional[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_a )
lowerCamelCase_ : Tuple = [x.name for x in Path(_a ).glob("*" )]
self.assertIn("source.spm" , _a )
MarianTokenizer.from_pretrained(_a )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.get_tokenizer()
lowerCamelCase_ : str = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=_a , truncation=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.get_tokenizer()
lowerCamelCase_ : List[Any] = tok(["I am a tiny frog", "I am a small frog"] , padding=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _UpperCamelCase ( self ):
# fmt: off
lowerCamelCase_ : List[Any] = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
lowerCamelCase_ : Optional[int] = "Tämä on testi"
lowerCamelCase_ : Tuple = "This is a test"
lowerCamelCase_ : int = [76, 7, 2047, 2]
lowerCamelCase_ : Optional[int] = [69, 12, 11, 940, 2]
lowerCamelCase_ : Optional[int] = tokenizer(_a ).input_ids
self.assertListEqual(_a , _a )
lowerCamelCase_ : Optional[int] = tokenizer(text_target=_a ).input_ids
self.assertListEqual(_a , _a )
lowerCamelCase_ : Any = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertEqual(_a , _a )
| 721 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
# setable values
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _UpperCamelCase ( cls ):
return cls()
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ):
pass
def _UpperCamelCase ( self ):
return KarrasVeSchedulerState.create()
def _UpperCamelCase ( self , a_ , a_ , a_ = () ):
lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy()
lowerCamelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 )
lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape )
lowerCamelCase_ : List[str] = sigma + gamma * sigma
lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
raise NotImplementedError()
| 73 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__magic_name__ : str = logging.get_logger(__name__)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if isinstance(_lowerCamelCase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(_lowerCamelCase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(_lowerCamelCase):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""")
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = ["pixel_values"]
def __init__( self , a_ = True , a_ = None , a_ = PILImageResampling.BILINEAR , a_ = True , a_ = None , a_ = True , a_ = 1 / 255 , a_ = True , a_ = None , a_ = None , **a_ , ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase_ : int = size if size is not None else {"shortest_edge": 224}
lowerCamelCase_ : Dict = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowerCamelCase_ : Dict = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase_ : Optional[int] = get_size_dict(UpperCamelCase__ , param_name="crop_size" )
lowerCamelCase_ : int = do_resize
lowerCamelCase_ : Optional[int] = size
lowerCamelCase_ : Optional[Any] = do_center_crop
lowerCamelCase_ : str = crop_size
lowerCamelCase_ : Any = resample
lowerCamelCase_ : Optional[Any] = do_rescale
lowerCamelCase_ : Any = rescale_factor
lowerCamelCase_ : Union[str, Any] = do_normalize
lowerCamelCase_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCamelCase ( self , a_ , a_ , a_ = PILImageResampling.BILINEAR , a_ = None , **a_ , ):
lowerCamelCase_ : Union[str, Any] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" in size:
lowerCamelCase_ : Union[str, Any] = get_resize_output_image_size(UpperCamelCase__ , size["shortest_edge"] , default_to_square=UpperCamelCase__ )
elif "height" in size and "width" in size:
lowerCamelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCamelCase ( self , a_ , a_ , a_ = None , **a_ , ):
lowerCamelCase_ : Optional[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCamelCase__ , size=(size["height"], size["width"]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCamelCase ( self , a_ , a_ , a_ = None , **a_ , ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ = None , **a_ , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCamelCase ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCamelCase_ : Optional[int] = to_numpy_array(UpperCamelCase__ )
if do_resize:
lowerCamelCase_ : int = self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ )
if do_center_crop:
lowerCamelCase_ : Union[str, Any] = self.center_crop(UpperCamelCase__ , size=UpperCamelCase__ )
if do_rescale:
lowerCamelCase_ : List[Any] = self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ )
if do_normalize:
lowerCamelCase_ : str = self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ )
lowerCamelCase_ : str = to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ )
return image
def _UpperCamelCase ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ):
lowerCamelCase_ : Any = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ : Any = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ : Any = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ : Optional[int] = image_std if image_std is not None else self.image_std
lowerCamelCase_ : int = size if size is not None else self.size
lowerCamelCase_ : Dict = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ : Dict = get_size_dict(UpperCamelCase__ , param_name="crop_size" )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
lowerCamelCase_ : Tuple = make_batched(UpperCamelCase__ )
lowerCamelCase_ : List[str] = [
[
self._preprocess_image(
image=UpperCamelCase__ , do_resize=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , crop_size=UpperCamelCase__ , do_rescale=UpperCamelCase__ , rescale_factor=UpperCamelCase__ , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , data_format=UpperCamelCase__ , )
for img in video
]
for video in videos
]
lowerCamelCase_ : List[Any] = {"pixel_values": videos}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 700 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = StableDiffusionDiffEditPipeline
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__UpperCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : List[str] = frozenset([] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCamelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCamelCase_ : Dict = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ )
lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Tuple = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Union[str, Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : int = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ : int = self.get_dummy_inputs(a_ )
lowerCamelCase_ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0]
lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(a_ , 1E-4 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ )
lowerCamelCase_ : int = pipe.generate_mask(**a_ )
lowerCamelCase_ : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ : List[str] = np.array([0] * 9 )
lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : Dict = pipe.invert(**a_ ).images
lowerCamelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Dict = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def _UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ )
lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ )
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : str = pipe.invert(**a_ ).images
lowerCamelCase_ : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _UpperCamelCase ( cls ):
lowerCamelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase_ : List[Any] = raw_image
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : str = "a bowl of fruit"
lowerCamelCase_ : Optional[int] = "a bowl of pears"
lowerCamelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents
lowerCamelCase_ : List[str] = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = "a bowl of fruit"
lowerCamelCase_ : Dict = "a bowl of pears"
lowerCamelCase_ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents
lowerCamelCase_ : Any = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 73 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''', '''False''' ) ) is not True, reason='''Skipping test because should only be run when releasing minor transformers version''', )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=a_ , )
assert hasattr(self , "env" )
def _UpperCamelCase ( self , a_ ):
# configuration for running training on smdistributed Model Parallel
lowerCamelCase_ : Any = {
"enabled": True,
"processes_per_host": 8,
}
lowerCamelCase_ : Any = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowerCamelCase_ : List[str] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowerCamelCase_ : Tuple = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=a_ , instance_type=self.instance_type , debugger_hook_config=a_ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=a_ , py_version="py36" , )
def _UpperCamelCase ( self , a_ ):
TrainingJobAnalytics(a_ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _UpperCamelCase ( self , a_ ):
# create estimator
lowerCamelCase_ : Dict = self.create_estimator(a_ )
# run training
estimator.fit()
# result dataframe
lowerCamelCase_ : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase_ : Any = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCamelCase_ : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase_ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , a_ )
| 701 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 73 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCAmelCase__ :
"""simple docstring"""
def _UpperCamelCase ( self , a_ , a_ , a_ ):
return None
class lowerCAmelCase__ :
"""simple docstring"""
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
return None
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase__ , "tf" , 12 , **lowercase__ )
@require_torch
@slow
def _UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase__ , "pt" , 12 , **lowercase__ )
@require_torch
@slow
def _UpperCamelCase ( self ):
from transformers import BertModel
lowerCamelCase_ : Tuple = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(lowercase__ ) )
vocab_file.flush()
lowerCamelCase_ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase_ : Dict = BertModel(BertConfig(vocab_size=len(lowercase__ ) ) )
model.save_pretrained(lowercase__ )
self._test_export(lowercase__ , "pt" , 12 , lowercase__ )
@require_tf
@slow
def _UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase_ : Optional[int] = self._test_export(lowercase__ , "tf" , 12 , **lowercase__ )
lowerCamelCase_ : Dict = quantize(Path(lowercase__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase__ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def _UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase_ : List[Any] = self._test_export(lowercase__ , "pt" , 12 , **lowercase__ )
lowerCamelCase_ : List[str] = quantize(lowercase__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase__ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_=None , **a_ ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase_ : Union[str, Any] = Path(lowercase__ ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ )
return path
except Exception as e:
self.fail(lowercase__ )
@require_torch
@require_tokenizers
@slow
def _UpperCamelCase ( self ):
from transformers import BertModel
lowerCamelCase_ : List[Any] = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowerCamelCase_ : Any = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowercase__ , lowercase__ , "pt" )
@require_tf
@require_tokenizers
@slow
def _UpperCamelCase ( self ):
from transformers import TFBertModel
lowerCamelCase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowerCamelCase_ : Dict = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowercase__ , lowercase__ , "tf" )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : str = FeatureExtractionPipeline(lowercase__ , lowercase__ )
lowerCamelCase_ : Optional[int] = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
lowerCamelCase_ : Any = infer_shapes(lowercase__ , lowercase__ )
# Assert all variables are present
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowercase__ )
self.assertSequenceEqual(variable_names[3:] , lowercase__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = ["input_ids", "attention_mask", "token_type_ids"]
lowerCamelCase_ : Dict = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
lowerCamelCase_ : Optional[int] = ensure_valid_input(FuncContiguousArgs() , lowercase__ , lowercase__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowercase__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowercase__ ) , set(lowercase__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowercase__ , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase_ : Tuple = ensure_valid_input(FuncNonContiguousArgs() , lowercase__ , lowercase__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowercase__ ) , 1 )
self.assertEqual(len(lowercase__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 702 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ['''audio_values''', '''audio_mask''']
def __init__( self , a_=2048 , a_=1 , a_=[16, 16] , a_=128 , a_=4_4100 , a_=86 , a_=2048 , a_=0.0 , **a_ , ):
super().__init__(
feature_size=a_ , sampling_rate=a_ , padding_value=a_ , **a_ , )
lowerCamelCase_ : Optional[Any] = spectrogram_length
lowerCamelCase_ : str = num_channels
lowerCamelCase_ : Tuple = patch_size
lowerCamelCase_ : Optional[int] = feature_size // self.patch_size[1]
lowerCamelCase_ : Union[str, Any] = n_fft
lowerCamelCase_ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase_ : int = sampling_rate
lowerCamelCase_ : Union[str, Any] = padding_value
lowerCamelCase_ : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a_ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=a_ , norm="slaney" , mel_scale="slaney" , ).T
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[int] = spectrogram(
a_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
lowerCamelCase_ : Any = log_spec[:, :-1]
lowerCamelCase_ : Dict = log_spec - 20.0
lowerCamelCase_ : Tuple = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , a_ , a_ = None , a_ = True , a_ = None , a_ = False , a_ = False , **a_ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCamelCase_ : Dict = isinstance(a_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowerCamelCase_ : List[Any] = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
lowerCamelCase_ : Optional[Any] = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase_ : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a_ ):
lowerCamelCase_ : Optional[Any] = [np.asarray(a_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase_ : Optional[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase_ : Any = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase_ : List[Any] = np.array(a_ ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase_ : int = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase_ : Dict = np.ones([len(a_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase_ : Optional[Any] = padded_audio_features * self.padding_value
for i in range(len(a_ ) ):
lowerCamelCase_ : Tuple = audio_features[i]
lowerCamelCase_ : List[str] = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase_ : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowerCamelCase_ : Any = {'audio_values': padded_audio_features}
lowerCamelCase_ : Any = BatchFeature(data=a_ , tensor_type=a_ )
return encoded_inputs
| 703 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ):
lowerCamelCase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
lowerCamelCase_ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[int] = {}
if accepts_eta:
lowerCamelCase_ : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample
lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 73 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCAmelCase__ ( __A ):
"""simple docstring"""
__UpperCAmelCase : str = '''mobilenet_v1'''
def __init__( self , a_=3 , a_=224 , a_=1.0 , a_=8 , a_="relu6" , a_=True , a_=0.9_99 , a_=0.02 , a_=0.0_01 , **a_ , ):
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase_ : Any = num_channels
lowerCamelCase_ : Any = image_size
lowerCamelCase_ : str = depth_multiplier
lowerCamelCase_ : Dict = min_depth
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : int = tf_padding
lowerCamelCase_ : Dict = classifier_dropout_prob
lowerCamelCase_ : Optional[int] = initializer_range
lowerCamelCase_ : str = layer_norm_eps
class lowerCAmelCase__ ( __A ):
"""simple docstring"""
__UpperCAmelCase : Tuple = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _UpperCamelCase ( self ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _UpperCamelCase ( self ):
return 1E-4
| 704 |
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCAmelCase__ ( __lowercase ):
"""simple docstring"""
def __init__( self , a_ = None , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , **a_ , ):
lowerCamelCase_ : Optional[Any] = path_or_paths
lowerCamelCase_ : Optional[int] = split if split or isinstance(_A , _A ) else "train"
lowerCamelCase_ : Optional[int] = features
lowerCamelCase_ : Tuple = cache_dir
lowerCamelCase_ : str = keep_in_memory
lowerCamelCase_ : int = streaming
lowerCamelCase_ : Tuple = num_proc
lowerCamelCase_ : Tuple = kwargs
@abstractmethod
def _UpperCamelCase ( self ):
pass
class lowerCAmelCase__ ( __lowercase ):
"""simple docstring"""
def __init__( self , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , **a_ , ):
lowerCamelCase_ : Dict = features
lowerCamelCase_ : Union[str, Any] = cache_dir
lowerCamelCase_ : Any = keep_in_memory
lowerCamelCase_ : Dict = streaming
lowerCamelCase_ : List[Any] = num_proc
lowerCamelCase_ : List[Any] = kwargs
@abstractmethod
def _UpperCamelCase ( self ):
pass
| 705 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 0 |
import os
import numpy
import onnx
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = a.name
lowerCamelCase_ : Optional[Any] = b.name
lowerCamelCase_ : Tuple = ""
lowerCamelCase_ : Union[str, Any] = ""
lowerCamelCase_ : Dict = a == b
lowerCamelCase_ : str = name_a
lowerCamelCase_ : Tuple = name_b
return res
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input):
if input_name == name:
node_proto.input.insert(lowerCAmelCase__ , lowerCAmelCase__)
node_proto.input.pop(i + 1)
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase__ , lowerCAmelCase__)
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase__ , lowerCAmelCase__)
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase__ , lowerCAmelCase__)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = list(model.graph.initializer)
lowerCamelCase_ : List[Any] = list(model_without_ext.graph.initializer)
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCamelCase_ : str = inits[i].name
lowerCamelCase_ : Optional[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i])
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase__ , lowerCAmelCase__)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = os.path.dirname(lowerCAmelCase__)
lowerCamelCase_ : Dict = os.path.basename(lowerCAmelCase__)
lowerCamelCase_ : Optional[Any] = onnx.load(os.path.join(lowerCAmelCase__ , lowerCAmelCase__))
lowerCamelCase_ : Dict = list(model.graph.initializer)
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : Dict = {}
lowerCamelCase_ : str = []
lowerCamelCase_ : List[Any] = 0
for i in range(len(lowerCAmelCase__)):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase__)):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j]):
dup_set.add(lowerCAmelCase__)
dup_set.add(lowerCAmelCase__)
lowerCamelCase_ : Optional[int] = inits[j].data_type
lowerCamelCase_ : Union[str, Any] = numpy.prod(inits[j].dims)
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , lowerCAmelCase__)
total_reduced_size += mem_size
lowerCamelCase_ : Optional[int] = inits[i].name
lowerCamelCase_ : int = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase__)
else:
lowerCamelCase_ : Optional[int] = [name_j]
ind_to_replace.append((j, i))
print("total reduced size: " , total_reduced_size / 1024 / 1024 / 1024 , "GB")
lowerCamelCase_ : List[str] = sorted(lowerCAmelCase__)
_remove_dup_initializers_from_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
lowerCamelCase_ : Tuple = "optimized_" + model_file_name
lowerCamelCase_ : Any = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
onnx.save(lowerCAmelCase__ , lowerCAmelCase__)
return new_model
| 706 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707 |
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 73 | 0 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__magic_name__ = 'src/transformers'
__magic_name__ = 'docs/source/en'
__magic_name__ = '.'
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(_A , "r" , encoding="utf-8" , newline="\n") as f:
lowerCamelCase_ : Optional[Any] = f.readlines()
# Find the start prompt.
lowerCamelCase_ : List[str] = 0
while not lines[start_index].startswith(_A):
start_index += 1
start_index += 1
lowerCamelCase_ : str = start_index
while not lines[end_index].startswith(_A):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__magic_name__ = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__magic_name__ = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
__magic_name__ = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__magic_name__ = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
__magic_name__ = direct_transformers_import(TRANSFORMERS_PATH)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , _A)
return [m.group(0) for m in matches]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = 2 if text == "✅" or text == "❌" else len(_A)
lowerCamelCase_ : Any = (width - text_length) // 2
lowerCamelCase_ : Tuple = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCamelCase_ : Optional[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCamelCase_ : Optional[Any] = {name: config.replace("Config" , "") for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCamelCase_ : List[Any] = collections.defaultdict(_A)
lowerCamelCase_ : Union[str, Any] = collections.defaultdict(_A)
lowerCamelCase_ : Optional[int] = collections.defaultdict(_A)
lowerCamelCase_ : Optional[int] = collections.defaultdict(_A)
lowerCamelCase_ : Dict = collections.defaultdict(_A)
# Let's lookup through all transformers object (once).
for attr_name in dir(_A):
lowerCamelCase_ : List[str] = None
if attr_name.endswith("Tokenizer"):
lowerCamelCase_ : str = slow_tokenizers
lowerCamelCase_ : Optional[Any] = attr_name[:-9]
elif attr_name.endswith("TokenizerFast"):
lowerCamelCase_ : str = fast_tokenizers
lowerCamelCase_ : Optional[Any] = attr_name[:-13]
elif _re_tf_models.match(_A) is not None:
lowerCamelCase_ : Tuple = tf_models
lowerCamelCase_ : str = _re_tf_models.match(_A).groups()[0]
elif _re_flax_models.match(_A) is not None:
lowerCamelCase_ : str = flax_models
lowerCamelCase_ : Optional[int] = _re_flax_models.match(_A).groups()[0]
elif _re_pt_models.match(_A) is not None:
lowerCamelCase_ : int = pt_models
lowerCamelCase_ : Union[str, Any] = _re_pt_models.match(_A).groups()[0]
if lookup_dict is not None:
while len(_A) > 0:
if attr_name in model_name_to_prefix.values():
lowerCamelCase_ : Optional[Any] = True
break
# Try again after removing the last word in the name
lowerCamelCase_ : Optional[Any] = "".join(camel_case_split(_A)[:-1])
# Let's build that table!
lowerCamelCase_ : int = list(model_name_to_config.keys())
model_names.sort(key=str.lower)
lowerCamelCase_ : Dict = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCamelCase_ : List[Any] = [len(_A) + 2 for c in columns]
lowerCamelCase_ : Any = max([len(_A) for name in model_names]) + 2
# Build the table per se
lowerCamelCase_ : str = "|" + "|".join([_center_text(_A , _A) for c, w in zip(_A , _A)]) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths]) + "|\n"
lowerCamelCase_ : Tuple = {True: "✅", False: "❌"}
for name in model_names:
lowerCamelCase_ : List[str] = model_name_to_prefix[name]
lowerCamelCase_ : str = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_A , _A) for l, w in zip(_A , _A)]) + "|\n"
return table
def __magic_name__ ( lowerCAmelCase_=False):
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = _find_text_in_file(
filename=os.path.join(_A , "index.md") , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
lowerCamelCase_ : List[Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_A , "index.md") , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:])
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__magic_name__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 708 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCAmelCase__ ( _A ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ConvBertTokenizer
def __init__( self , a_=None , a_=None , a_=True , a_="[UNK]" , a_="[SEP]" , a_="[PAD]" , a_="[CLS]" , a_="[MASK]" , a_=True , a_=None , **a_ , ):
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase__ ) != tokenize_chinese_chars
):
lowerCamelCase_ : Tuple = getattr(UpperCamelCase__ , normalizer_state.pop("type" ) )
lowerCamelCase_ : str = do_lower_case
lowerCamelCase_ : str = strip_accents
lowerCamelCase_ : Tuple = tokenize_chinese_chars
lowerCamelCase_ : str = normalizer_class(**UpperCamelCase__ )
lowerCamelCase_ : List[str] = do_lower_case
def _UpperCamelCase ( self , a_ , a_=None ):
lowerCamelCase_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Dict = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 709 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__magic_name__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = '''left'''
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : str = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : Dict = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ ):
if self.remove_space:
lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ : str = inputs
lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ )
lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
lowerCamelCase_ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.preprocess_text(a_ )
lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
lowerCamelCase_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : int = cur_pieces[1:]
else:
lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ )
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
lowerCamelCase_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ : Union[str, Any] = "".join(a_ )
lowerCamelCase_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 | 0 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = field(
default=snake_case__, metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(snake_case__ )} )
__UpperCAmelCase : str = field(
default=snake_case__, metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
__UpperCAmelCase : int = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
__UpperCAmelCase : int = field(
default=128, metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''}, )
__UpperCAmelCase : int = field(
default=64, metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
}, )
__UpperCAmelCase : int = field(
default=30, metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
}, )
__UpperCAmelCase : bool = field(
default=snake_case__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : bool = field(
default=snake_case__, metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
__UpperCAmelCase : float = field(
default=0.0, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
__UpperCAmelCase : int = field(
default=20, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
__UpperCAmelCase : int = field(
default=0, metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
}, )
__UpperCAmelCase : int = field(default=1, metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCAmelCase__ ( snake_case__ ):
"""simple docstring"""
__UpperCAmelCase : int = '''train'''
__UpperCAmelCase : str = '''dev'''
class lowerCAmelCase__ ( snake_case__ ):
"""simple docstring"""
__UpperCAmelCase : SquadDataTrainingArguments
__UpperCAmelCase : List[SquadFeatures]
__UpperCAmelCase : Split
__UpperCAmelCase : bool
def __init__( self , a_ , a_ , a_ = None , a_ = Split.train , a_ = False , a_ = None , a_ = "pt" , ):
lowerCamelCase_ : List[str] = args
lowerCamelCase_ : str = is_language_sensitive
lowerCamelCase_ : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_A , _A ):
try:
lowerCamelCase_ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ : Union[str, Any] = mode
# Load data features from cache or dataset file
lowerCamelCase_ : str = 'v2' if args.version_2_with_negative else 'v1'
lowerCamelCase_ : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ : List[str] = cached_features_file + '.lock'
with FileLock(_A ):
if os.path.exists(_A ) and not args.overwrite_cache:
lowerCamelCase_ : Union[str, Any] = time.time()
lowerCamelCase_ : Any = torch.load(_A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ : Any = self.old_features['features']
lowerCamelCase_ : Optional[Any] = self.old_features.get("dataset" , _A )
lowerCamelCase_ : Any = self.old_features.get("examples" , _A )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ : Optional[int] = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ : Optional[int] = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ : str = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_A , )
lowerCamelCase_ : str = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _A , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
return len(self.features )
def __getitem__( self , a_ ):
# Convert to Tensors and build dataset
lowerCamelCase_ : List[str] = self.features[i]
lowerCamelCase_ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ : Dict = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ : int = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ : Any = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ : Optional[int] = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ : int = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 710 |
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int((number_a + number_a) / 2)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(lowerCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowerCamelCase_ : Optional[int] = lower
lowerCamelCase_ : Tuple = higher
lowerCamelCase_ : Union[str, Any] = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_)
last_numbers.append(lowerCAmelCase_)
if answer(lowerCAmelCase_) == "low":
lowerCamelCase_ : Any = number
elif answer(lowerCAmelCase_) == "high":
lowerCamelCase_ : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip())
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 | 0 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=2 , a_=56 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=2 , a_=2 , a_=7 , a_="gelu_new" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=4 , a_="block_sparse" , a_=True , a_=False , a_=2 , a_=3 , ):
lowerCamelCase_ : Any = parent
lowerCamelCase_ : Any = batch_size
lowerCamelCase_ : int = seq_length
lowerCamelCase_ : Optional[int] = is_training
lowerCamelCase_ : str = use_attention_mask
lowerCamelCase_ : Optional[int] = use_token_type_ids
lowerCamelCase_ : Union[str, Any] = use_labels
lowerCamelCase_ : Any = vocab_size
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : Any = num_attention_heads
lowerCamelCase_ : str = intermediate_size
lowerCamelCase_ : str = hidden_act
lowerCamelCase_ : List[Any] = hidden_dropout_prob
lowerCamelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase_ : List[Any] = max_position_embeddings
lowerCamelCase_ : Union[str, Any] = type_vocab_size
lowerCamelCase_ : List[str] = type_sequence_label_size
lowerCamelCase_ : Optional[Any] = initializer_range
lowerCamelCase_ : Tuple = num_choices
lowerCamelCase_ : Optional[Any] = rescale_embeddings
lowerCamelCase_ : Optional[int] = attention_type
lowerCamelCase_ : List[str] = use_bias
lowerCamelCase_ : Tuple = block_size
lowerCamelCase_ : List[Any] = num_random_blocks
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : List[str] = None
if self.use_attention_mask:
lowerCamelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Dict = None
if self.use_token_type_ids:
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Optional[Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = config_and_inputs
lowerCamelCase_ : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
super().test_hidden_states_output()
@slow
def _UpperCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ : List[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(__a )
def _UpperCamelCase ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ : Any = self._prepare_for_class(__a , __a )
lowerCamelCase_ : List[Any] = model_class(__a )
@jax.jit
def model_jitted(a_ , a_=None , **a_ ):
return model(input_ids=__a , attention_mask=__a , **__a )
with self.subTest("JIT Enabled" ):
lowerCamelCase_ : Dict = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCamelCase_ : Union[str, Any] = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_=1E-5 , a_="outputs" , a_=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(__a , __a , __a , __a , __a , __a )
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCamelCase_) as metadata_file:
lowerCamelCase_ : Tuple = json.load(lowerCamelCase_)
lowerCamelCase_ : str = LukeConfig(use_entity_aware_attention=lowerCamelCase_ , **metadata["model_config"])
# Load in the weights from the checkpoint_path
lowerCamelCase_ : List[Any] = torch.load(lowerCamelCase_ , map_location="cpu")["""module"""]
# Load the entity vocab file
lowerCamelCase_ : Optional[int] = load_original_entity_vocab(lowerCamelCase_)
# add an entry for [MASK2]
lowerCamelCase_ : List[str] = max(entity_vocab.values()) + 1
config.entity_vocab_size += 1
lowerCamelCase_ : List[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"])
# Add special tokens to the token vocabulary for downstream tasks
lowerCamelCase_ : str = AddedToken("<ent>" , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_)
lowerCamelCase_ : Tuple = AddedToken("<ent2>" , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_)
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]})
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""")
tokenizer.save_pretrained(lowerCamelCase_)
with open(os.path.join(lowerCamelCase_ , "tokenizer_config.json") , "r") as f:
lowerCamelCase_ : Dict = json.load(lowerCamelCase_)
lowerCamelCase_ : Union[str, Any] = """MLukeTokenizer"""
with open(os.path.join(lowerCamelCase_ , "tokenizer_config.json") , "w") as f:
json.dump(lowerCamelCase_ , lowerCamelCase_)
with open(os.path.join(lowerCamelCase_ , MLukeTokenizer.vocab_files_names["entity_vocab_file"]) , "w") as f:
json.dump(lowerCamelCase_ , lowerCamelCase_)
lowerCamelCase_ : Any = MLukeTokenizer.from_pretrained(lowerCamelCase_)
# Initialize the embeddings of the special tokens
lowerCamelCase_ : Dict = tokenizer.convert_tokens_to_ids(["@"])[0]
lowerCamelCase_ : Tuple = tokenizer.convert_tokens_to_ids(["#"])[0]
lowerCamelCase_ : int = state_dict["""embeddings.word_embeddings.weight"""]
lowerCamelCase_ : Dict = word_emb[ent_init_index].unsqueeze(0)
lowerCamelCase_ : Optional[Any] = word_emb[enta_init_index].unsqueeze(0)
lowerCamelCase_ : Optional[Any] = torch.cat([word_emb, ent_emb, enta_emb])
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCamelCase_ : Any = state_dict[bias_name]
lowerCamelCase_ : int = decoder_bias[ent_init_index].unsqueeze(0)
lowerCamelCase_ : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0)
lowerCamelCase_ : List[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias])
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers):
for matrix_name in ["query.weight", "query.bias"]:
lowerCamelCase_ : Dict = F"""encoder.layer.{layer_index}.attention.self."""
lowerCamelCase_ : Any = state_dict[prefix + matrix_name]
lowerCamelCase_ : int = state_dict[prefix + matrix_name]
lowerCamelCase_ : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCamelCase_ : List[Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowerCamelCase_ : List[str] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0)
lowerCamelCase_ : int = torch.cat([entity_emb, entity_mask_emb])
# add [MASK2] for 'entity_predictions.bias'
lowerCamelCase_ : List[Any] = state_dict["""entity_predictions.bias"""]
lowerCamelCase_ : str = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0)
lowerCamelCase_ : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias])
lowerCamelCase_ : Tuple = LukeForMaskedLM(config=lowerCamelCase_).eval()
state_dict.pop("entity_predictions.decoder.weight")
state_dict.pop("lm_head.decoder.weight")
state_dict.pop("lm_head.decoder.bias")
lowerCamelCase_ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head") or key.startswith("entity_predictions")):
lowerCamelCase_ : Optional[Any] = state_dict[key]
else:
lowerCamelCase_ : Dict = state_dict[key]
lowerCamelCase_ : str = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_)
if set(lowerCamelCase_) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""")
if set(lowerCamelCase_) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""")
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCamelCase_ : str = MLukeTokenizer.from_pretrained(lowerCamelCase_ , task="entity_classification")
lowerCamelCase_ : Dict = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
lowerCamelCase_ : Union[str, Any] = (0, 9)
lowerCamelCase_ : Dict = tokenizer(lowerCamelCase_ , entity_spans=[span] , return_tensors="pt")
lowerCamelCase_ : List[Any] = model(**lowerCamelCase_)
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase_ : List[Any] = torch.Size((1, 33, 768))
lowerCamelCase_ : Any = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]])
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""")
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase_ : List[Any] = torch.Size((1, 1, 768))
lowerCamelCase_ : Union[str, Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]])
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""")
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4):
raise ValueError
# Verify masked word/entity prediction
lowerCamelCase_ : Tuple = MLukeTokenizer.from_pretrained(lowerCamelCase_)
lowerCamelCase_ : Dict = """Tokyo is the capital of <mask>."""
lowerCamelCase_ : Optional[int] = (24, 30)
lowerCamelCase_ : Optional[Any] = tokenizer(lowerCamelCase_ , entity_spans=[span] , return_tensors="pt")
lowerCamelCase_ : Dict = model(**lowerCamelCase_)
lowerCamelCase_ : List[str] = encoding["""input_ids"""][0].tolist()
lowerCamelCase_ : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>"))
lowerCamelCase_ : str = outputs.logits[0][mask_position_id].argmax(dim=-1)
assert "Japan" == tokenizer.decode(lowerCamelCase_)
lowerCamelCase_ : Dict = outputs.entity_logits[0][0].argmax().item()
lowerCamelCase_ : Any = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:")][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowerCamelCase_))
model.save_pretrained(lowerCamelCase_)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
lowerCamelCase_ : List[Any] = [json.loads(lowerCamelCase_) for line in open(lowerCamelCase_)]
lowerCamelCase_ : Tuple = {}
for entry in data:
lowerCamelCase_ : List[str] = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCamelCase_ : Optional[int] = entity_id
break
lowerCamelCase_ : int = F"""{language}:{entity_name}"""
lowerCamelCase_ : List[Any] = entity_id
return new_mapping
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__magic_name__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = [[] for _ in range(A__)]
lowerCamelCase_ : Dict = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative")
if key == 1 or len(A__) <= key:
return input_string
for position, character in enumerate(A__):
lowerCamelCase_ : Tuple = position % (lowest * 2) # puts it in bounds
lowerCamelCase_ : List[str] = min(A__ , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append(A__)
lowerCamelCase_ : int = ["".join(A__) for row in temp_grid]
lowerCamelCase_ : int = "".join(A__)
return output_string
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative")
if key == 1:
return input_string
lowerCamelCase_ : List[Any] = [[] for _ in range(A__)] # generates template
for position in range(len(A__)):
lowerCamelCase_ : Any = position % (lowest * 2) # puts it in bounds
lowerCamelCase_ : str = min(A__ , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append("*")
lowerCamelCase_ : Optional[Any] = 0
for row in temp_grid: # fills in the characters
lowerCamelCase_ : Any = input_string[counter : counter + len(A__)]
grid.append(list(A__))
counter += len(A__)
lowerCamelCase_ : Any = "" # reads as zigzag
for position in range(len(A__)):
lowerCamelCase_ : Optional[int] = position % (lowest * 2) # puts it in bounds
lowerCamelCase_ : Dict = min(A__ , lowest * 2 - num) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0)
return output_string
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = {}
for key_guess in range(1 , len(A__)): # tries every key
lowerCamelCase_ : Optional[Any] = decrypt(A__ , A__)
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 0 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_snake_case = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
_snake_case = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_snake_case = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_snake_case = '''allenai'''
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = dict((re.sub(R"@@$" , "" , lowerCamelCase__), v) if k.endswith("@@") else (re.sub(R"$" , "</w>" , lowerCamelCase__), v) for k, v in d.items())
lowerCamelCase_ : Optional[Any] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
lowerCamelCase_ : int = d[k] # restore
return da
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert os.path.exists(lowerCamelCase__)
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__)
print(F"""Writing results to {pytorch_dump_folder_path}""")
# handle various types of models
lowerCamelCase_ : List[str] = basename(lowerCamelCase__)
lowerCamelCase_ : Tuple = dirname(lowerCamelCase__)
lowerCamelCase_ : List[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase_ : List[Any] = cls.hub_models()
lowerCamelCase_ : List[Any] = {"bpe": "fastbpe", "tokenizer": "moses"}
lowerCamelCase_ : int = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"""using checkpoint {checkpoint_file}""")
lowerCamelCase_ : Optional[int] = hub_utils.from_pretrained(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , archive_map=lowerCamelCase__ , **lowerCamelCase__)
lowerCamelCase_ : List[Any] = vars(chkpt["args"]["model"])
lowerCamelCase_ : Tuple = args["source_lang"]
lowerCamelCase_ : Optional[int] = args["target_lang"]
lowerCamelCase_ : List[Any] = dirname(lowerCamelCase__)
lowerCamelCase_ : Any = basename(lowerCamelCase__)
# dicts
lowerCamelCase_ : int = os.path.join(lowerCamelCase__ , F"""dict.{src_lang}.txt""")
lowerCamelCase_ : int = os.path.join(lowerCamelCase__ , F"""dict.{tgt_lang}.txt""")
lowerCamelCase_ : Any = Dictionary.load(lowerCamelCase__)
lowerCamelCase_ : Dict = rewrite_dict_keys(src_dict.indices)
lowerCamelCase_ : str = len(lowerCamelCase__)
lowerCamelCase_ : str = os.path.join(lowerCamelCase__ , "vocab-src.json")
print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""")
with open(lowerCamelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(lowerCamelCase__ , ensure_ascii=lowerCamelCase__ , indent=lowerCamelCase__))
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase_ : int = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase_ : Any = False
break
lowerCamelCase_ : Dict = Dictionary.load(lowerCamelCase__)
lowerCamelCase_ : Any = rewrite_dict_keys(tgt_dict.indices)
lowerCamelCase_ : Optional[int] = len(lowerCamelCase__)
lowerCamelCase_ : Optional[Any] = os.path.join(lowerCamelCase__ , "vocab-tgt.json")
print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""")
with open(lowerCamelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(lowerCamelCase__ , ensure_ascii=lowerCamelCase__ , indent=lowerCamelCase__))
# merges_file (bpecodes)
lowerCamelCase_ : Optional[int] = os.path.join(lowerCamelCase__ , VOCAB_FILES_NAMES["merges_file"])
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase_ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__)
if os.path.exists(lowerCamelCase__):
break
with open(lowerCamelCase__ , encoding="utf-8") as fin:
lowerCamelCase_ : str = fin.read()
lowerCamelCase_ : Optional[int] = re.sub(R" \d+$" , "" , lowerCamelCase__ , 0 , re.M) # remove frequency number
print(F"""Generating {merges_file}""")
with open(lowerCamelCase__ , "w" , encoding="utf-8") as fout:
fout.write(lowerCamelCase__)
# model config
lowerCamelCase_ : List[str] = os.path.join(lowerCamelCase__ , "config.json")
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args['tokenizer']}"""
lowerCamelCase_ : List[str] = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
lowerCamelCase_ : Dict = 5
lowerCamelCase_ : str = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase_ : List[Any] = best_score_hparams[model_dir]["length_penalty"]
else:
lowerCamelCase_ : List[str] = 1.0
print(F"""Generating {fsmt_model_config_file}""")
with open(lowerCamelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(lowerCamelCase__ , ensure_ascii=lowerCamelCase__ , indent=lowerCamelCase__))
# tokenizer config
lowerCamelCase_ : List[Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__)
lowerCamelCase_ : int = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(F"""Generating {fsmt_tokenizer_config_file}""")
with open(lowerCamelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(lowerCamelCase__ , ensure_ascii=lowerCamelCase__ , indent=lowerCamelCase__))
# model
lowerCamelCase_ : Tuple = chkpt["models"][0]
lowerCamelCase_ : str = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase_ : Optional[Any] = OrderedDict(("model." + k, v) for k, v in model_state_dict.items())
# remove unneeded keys
lowerCamelCase_ : Tuple = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase__ , lowerCamelCase__)
lowerCamelCase_ : List[str] = FSMTConfig.from_pretrained(lowerCamelCase__)
lowerCamelCase_ : List[str] = FSMTForConditionalGeneration(lowerCamelCase__)
# check that it loads ok
model_new.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__)
# save
lowerCamelCase_ : Union[str, Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__)
print(F"""Generating {pytorch_weights_dump_path}""")
torch.save(lowerCamelCase__ , lowerCamelCase__)
print("Conversion is done!")
print("\nLast step is to upload the files to s3")
print(F"""cd {data_root}""")
print(F"""transformers-cli upload {model_dir}""")
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 714 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_) , lowerCAmelCase_)
return number - int(lowerCAmelCase_)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 73 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''t5'''
__UpperCAmelCase : Any = ['''past_key_values''']
__UpperCAmelCase : Union[str, Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , a_=3_2128 , a_=512 , a_=64 , a_=2048 , a_=6 , a_=None , a_=8 , a_=32 , a_=128 , a_=0.1 , a_=1E-6 , a_=1.0 , a_="relu" , a_=True , a_=True , a_=0 , a_=1 , **a_ , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Union[str, Any] = d_model
lowerCamelCase_ : Any = d_kv
lowerCamelCase_ : Any = d_ff
lowerCamelCase_ : Union[str, Any] = num_layers
lowerCamelCase_ : List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase_ : Optional[Any] = num_heads
lowerCamelCase_ : List[str] = relative_attention_num_buckets
lowerCamelCase_ : int = relative_attention_max_distance
lowerCamelCase_ : Optional[Any] = dropout_rate
lowerCamelCase_ : Optional[Any] = layer_norm_epsilon
lowerCamelCase_ : Optional[Any] = initializer_factor
lowerCamelCase_ : Optional[Any] = feed_forward_proj
lowerCamelCase_ : Union[str, Any] = use_cache
lowerCamelCase_ : Optional[int] = self.feed_forward_proj.split("-" )
lowerCamelCase_ : List[Any] = act_info[-1]
lowerCamelCase_ : List[str] = act_info[0] == "gated"
if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCamelCase_ : List[str] = "gelu_new"
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ , )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCamelCase_ : Dict = "past_encoder_sequence + sequence"
lowerCamelCase_ : List[str] = {0: "batch"}
lowerCamelCase_ : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCamelCase_ : List[str] = {0: "batch", 1: "decoder_sequence"}
lowerCamelCase_ : Any = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction="inputs" )
return common_inputs
@property
def _UpperCamelCase ( self ):
return 13
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 716 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''luke'''
def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = entity_vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = entity_emb_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[int] = use_entity_aware_attention
lowerCamelCase_ : str = classifier_dropout
| 73 | 0 |
from collections.abc import Callable
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
lowerCamelCase_ : float = a
lowerCamelCase_ : float = b
if function(lowerCAmelCase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCAmelCase_ ) == 0:
return b
elif (
function(lowerCAmelCase_ ) * function(lowerCAmelCase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
lowerCamelCase_ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowerCAmelCase_ ) == 0:
return mid
elif function(lowerCAmelCase_ ) * function(lowerCAmelCase_ ) < 0:
lowerCamelCase_ : str = mid
else:
lowerCamelCase_ : Dict = mid
lowerCamelCase_ : Union[str, Any] = start + (end - start) / 2.0
return mid
def __magic_name__ ( lowerCAmelCase_ ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 717 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id")
lowerCamelCase_ : Dict = partition_df.collect()
lowerCamelCase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , a_ , a_=None , ):
lowerCamelCase_ : Dict = df
lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = SparkConfig
def __init__( self , a_ , a_ = None , a_ = None , **a_ , ):
import pyspark
lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ : Optional[Any] = df
lowerCamelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(a_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , a_ ):
import pyspark
def get_arrow_batch_size(a_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowerCamelCase_ : str = self.df.count()
lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ : int = self.df.repartition(a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
import pyspark
lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowerCamelCase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ : int = self.config.features
lowerCamelCase_ : Any = self._writer_batch_size
lowerCamelCase_ : Tuple = self._fs.storage_options
def write_arrow(a_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowerCamelCase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowerCamelCase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ):
self._validate_cache_dir()
lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs )
lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join
lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCamelCase_ : int = path_join(self._output_dir , a_ )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowerCamelCase_ : Dict = total_num_examples
lowerCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a_ , a_ , a_ , ):
rename(
a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Dict = 0
for i in range(len(a_ ) ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def _UpperCamelCase ( self , a_ , ):
return SparkExamplesIterable(self.df )
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__magic_name__ = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
__magic_name__ = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
__magic_name__ = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
__magic_name__ = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
__magic_name__ = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase ( self ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def _UpperCamelCase ( self , a_ , a_ , a_=[1, 10, 100] , a_=4 , a_=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE_ ) as executor:
lowerCamelCase_ : str = []
lowerCamelCase_ : Tuple = Counter()
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : Tuple = defaultdict(SCREAMING_SNAKE_CASE_ )
for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
for candidate in candidates:
lowerCamelCase_ : Union[str, Any] = candidate + "\n" + test_case
lowerCamelCase_ : int = (test_program, timeout, task_id, completion_id[task_id])
lowerCamelCase_ : Union[str, Any] = executor.submit(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
futures.append(SCREAMING_SNAKE_CASE_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ : Optional[Any] = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = [], []
for result in results.values():
result.sort()
lowerCamelCase_ : Dict = [r[1]["passed"] for r in result]
total.append(len(SCREAMING_SNAKE_CASE_ ) )
correct.append(sum(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ : str = np.array(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ : Union[str, Any] = k
lowerCamelCase_ : Dict = {F"""pass@{k}""": estimate_pass_at_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
def estimator(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1))
if isinstance(__a , __a):
lowerCamelCase_ : List[str] = itertools.repeat(__a , len(__a))
else:
assert len(__a) == len(__a)
lowerCamelCase_ : str = iter(__a)
return np.array([estimator(int(__a) , int(__a) , __a) for n, c in zip(__a , __a)])
| 719 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ctrl'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Optional[int] = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Union[str, Any] = n_head
lowerCamelCase_ : str = dff
lowerCamelCase_ : Tuple = resid_pdrop
lowerCamelCase_ : Any = embd_pdrop
lowerCamelCase_ : Dict = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Any = use_cache
super().__init__(**a_ )
| 73 | 0 |
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return np.where(vector > 0 , lowerCAmelCase_ , (alpha * (np.exp(lowerCAmelCase_) - 1)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
__UpperCAmelCase : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def _UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__UpperCAmelCase : float = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
def _UpperCamelCase ( self ):
if self.train_file is not None:
lowerCamelCase_ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f:
lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())]
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCamelCase_ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Any = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split(".")[-1]
if extension == "txt":
lowerCamelCase_ : List[str] = "text"
lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(F"""New config: {config}""")
lowerCamelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name.")
if model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_)
model.resize_token_embeddings(len(lowerCAmelCase_))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Optional[Any] = datasets["train"].column_names
else:
lowerCamelCase_ : Dict = datasets["validation"].column_names
lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_):
# Remove empty lines
lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length)
lowerCamelCase_ : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCamelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCamelCase_ : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCamelCase_ : Dict = model_args.model_name_or_path
else:
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json"))
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase_ : Tuple = trainer.evaluate()
lowerCamelCase_ : str = math.exp(eval_output["eval_loss"])
lowerCamelCase_ : Tuple = perplexity
lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
return results
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 73 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = '''wav2vec2'''
def __init__( self , a_=32 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.1 , a_=0.1 , a_=0.02 , a_=1E-5 , a_="group" , a_="gelu" , a_=(512, 512, 512, 512, 512, 512, 512) , a_=(5, 2, 2, 2, 2, 2, 2) , a_=(10, 3, 3, 3, 3, 2, 2) , a_=False , a_=128 , a_=16 , a_=False , a_=True , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_=320 , a_=2 , a_=0.1 , a_=100 , a_=256 , a_=256 , a_=0.1 , a_="sum" , a_=False , a_=False , a_=256 , a_=(512, 512, 512, 512, 1500) , a_=(5, 3, 3, 1, 1) , a_=(1, 2, 3, 1, 1) , a_=512 , a_=0 , a_=1 , a_=2 , a_=False , a_=3 , a_=2 , a_=3 , a_=None , a_=None , **a_ , ):
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
lowerCamelCase_ : Union[str, Any] = hidden_size
lowerCamelCase_ : Optional[int] = feat_extract_norm
lowerCamelCase_ : Optional[int] = feat_extract_activation
lowerCamelCase_ : Dict = list(UpperCAmelCase__ )
lowerCamelCase_ : str = list(UpperCAmelCase__ )
lowerCamelCase_ : str = list(UpperCAmelCase__ )
lowerCamelCase_ : List[Any] = conv_bias
lowerCamelCase_ : List[Any] = num_conv_pos_embeddings
lowerCamelCase_ : Dict = num_conv_pos_embedding_groups
lowerCamelCase_ : Optional[int] = len(self.conv_dim )
lowerCamelCase_ : Any = num_hidden_layers
lowerCamelCase_ : int = intermediate_size
lowerCamelCase_ : List[str] = hidden_act
lowerCamelCase_ : Dict = num_attention_heads
lowerCamelCase_ : Tuple = hidden_dropout
lowerCamelCase_ : Optional[int] = attention_dropout
lowerCamelCase_ : Tuple = activation_dropout
lowerCamelCase_ : str = feat_proj_dropout
lowerCamelCase_ : int = final_dropout
lowerCamelCase_ : Optional[Any] = layerdrop
lowerCamelCase_ : Union[str, Any] = layer_norm_eps
lowerCamelCase_ : List[str] = initializer_range
lowerCamelCase_ : str = vocab_size
lowerCamelCase_ : Optional[Any] = do_stable_layer_norm
lowerCamelCase_ : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ : List[Any] = apply_spec_augment
lowerCamelCase_ : Optional[Any] = mask_time_prob
lowerCamelCase_ : Union[str, Any] = mask_time_length
lowerCamelCase_ : Optional[int] = mask_time_min_masks
lowerCamelCase_ : Union[str, Any] = mask_feature_prob
lowerCamelCase_ : Any = mask_feature_length
lowerCamelCase_ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase_ : Optional[int] = num_codevectors_per_group
lowerCamelCase_ : int = num_codevector_groups
lowerCamelCase_ : Optional[Any] = contrastive_logits_temperature
lowerCamelCase_ : str = feat_quantizer_dropout
lowerCamelCase_ : List[str] = num_negatives
lowerCamelCase_ : int = codevector_dim
lowerCamelCase_ : Any = proj_codevector_dim
lowerCamelCase_ : List[Any] = diversity_loss_weight
# ctc loss
lowerCamelCase_ : Tuple = ctc_loss_reduction
lowerCamelCase_ : Optional[Any] = ctc_zero_infinity
# adapter
lowerCamelCase_ : Dict = add_adapter
lowerCamelCase_ : str = adapter_kernel_size
lowerCamelCase_ : Optional[Any] = adapter_stride
lowerCamelCase_ : Union[str, Any] = num_adapter_layers
lowerCamelCase_ : Union[str, Any] = output_hidden_size or hidden_size
lowerCamelCase_ : Dict = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase_ : Union[str, Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase_ : List[str] = list(UpperCAmelCase__ )
lowerCamelCase_ : List[str] = list(UpperCAmelCase__ )
lowerCamelCase_ : Dict = list(UpperCAmelCase__ )
lowerCamelCase_ : Optional[int] = xvector_output_dim
@property
def _UpperCamelCase ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 721 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
# setable values
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _UpperCamelCase ( cls ):
return cls()
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ):
pass
def _UpperCamelCase ( self ):
return KarrasVeSchedulerState.create()
def _UpperCamelCase ( self , a_ , a_ , a_ = () ):
lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy()
lowerCamelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 )
lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape )
lowerCamelCase_ : List[str] = sigma + gamma * sigma
lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
raise NotImplementedError()
| 73 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=None):
'''simple docstring'''
lowerCamelCase_ : Any = None
if token is not None:
lowerCamelCase_ : Any = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
lowerCamelCase_ : str = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCamelCase_ : Optional[int] = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase).json()
lowerCamelCase_ : List[str] = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]})
lowerCamelCase_ : int = math.ceil((result["total_count"] - 100) / 100)
for i in range(_lowerCAmelCase):
lowerCamelCase_ : int = requests.get(url + F"""&page={i + 2}""" , headers=_lowerCAmelCase).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]})
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""")
return {}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=None):
'''simple docstring'''
lowerCamelCase_ : str = None
if token is not None:
lowerCamelCase_ : Optional[int] = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
lowerCamelCase_ : List[str] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCamelCase_ : int = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase).json()
lowerCamelCase_ : List[Any] = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]})
lowerCamelCase_ : int = math.ceil((result["total_count"] - 100) / 100)
for i in range(_lowerCAmelCase):
lowerCamelCase_ : Optional[int] = requests.get(url + F"""&page={i + 2}""" , headers=_lowerCAmelCase).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]})
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""")
return {}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = None
if token is not None:
lowerCamelCase_ : str = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
lowerCamelCase_ : Union[str, Any] = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase , allow_redirects=_lowerCAmelCase)
lowerCamelCase_ : Optional[Any] = result.headers["Location"]
lowerCamelCase_ : Union[str, Any] = requests.get(_lowerCAmelCase , allow_redirects=_lowerCAmelCase)
lowerCamelCase_ : List[Any] = os.path.join(_lowerCAmelCase , F"""{artifact_name}.zip""")
with open(_lowerCAmelCase , "wb") as fp:
fp.write(response.content)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=None):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : Dict = []
lowerCamelCase_ : int = None
with zipfile.ZipFile(_lowerCAmelCase) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCAmelCase):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_lowerCAmelCase) as f:
for line in f:
lowerCamelCase_ : Optional[Any] = line.decode("UTF-8").strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCamelCase_ : Any = line[: line.index(": ")]
lowerCamelCase_ : Optional[int] = line[line.index(": ") + len(": ") :]
errors.append([error_line, error])
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED "):
# `test` is the test method that failed
lowerCamelCase_ : Dict = line[len("FAILED ") :]
failed_tests.append(_lowerCAmelCase)
elif filename == "job_name.txt":
lowerCamelCase_ : List[str] = line
if len(_lowerCAmelCase) != len(_lowerCAmelCase):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_lowerCAmelCase)} for `errors` """
F"""and {len(_lowerCAmelCase)} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
" problem.")
lowerCamelCase_ : Union[str, Any] = None
if job_name and job_links:
lowerCamelCase_ : str = job_links.get(_lowerCAmelCase , _lowerCAmelCase)
# A list with elements of the form (line of error, error, failed test)
lowerCamelCase_ : Optional[Any] = [x + [y] + [job_link] for x, y in zip(_lowerCAmelCase , _lowerCAmelCase)]
return result
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=None):
'''simple docstring'''
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : List[str] = [os.path.join(_lowerCAmelCase , _lowerCAmelCase) for p in os.listdir(_lowerCAmelCase) if p.endswith(".zip")]
for p in paths:
errors.extend(get_errors_from_single_artifact(_lowerCAmelCase , job_links=_lowerCAmelCase))
return errors
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=None):
'''simple docstring'''
lowerCamelCase_ : Any = Counter()
counter.update([x[1] for x in logs])
lowerCamelCase_ : Union[str, Any] = counter.most_common()
lowerCamelCase_ : List[Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCamelCase_ : int = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCamelCase_ : str = dict(sorted(r.items() , key=lambda lowerCAmelCase_: item[1]["count"] , reverse=_lowerCAmelCase))
return r
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = test.split("::")[0]
if test.startswith("tests/models/"):
lowerCamelCase_ : Union[str, Any] = test.split("/")[2]
else:
lowerCamelCase_ : List[Any] = None
return test
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=None):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = [(x[0], x[1], get_model(x[2])) for x in logs]
lowerCamelCase_ : str = [x for x in logs if x[2] is not None]
lowerCamelCase_ : int = {x[2] for x in logs}
lowerCamelCase_ : Dict = {}
for test in tests:
lowerCamelCase_ : Dict = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test])
lowerCamelCase_ : Dict = counter.most_common()
lowerCamelCase_ : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCamelCase_ : Tuple = sum(error_counts.values())
if n_errors > 0:
lowerCamelCase_ : Union[str, Any] = {"count": n_errors, "errors": error_counts}
lowerCamelCase_ : Optional[int] = dict(sorted(r.items() , key=lambda lowerCAmelCase_: item[1]["count"] , reverse=_lowerCAmelCase))
return r
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = "| no. | error | status |"
lowerCamelCase_ : Union[str, Any] = "|-:|:-|:-|"
lowerCamelCase_ : Optional[int] = [header, sep]
for error in reduced_by_error:
lowerCamelCase_ : str = reduced_by_error[error]["count"]
lowerCamelCase_ : List[Any] = F"""| {count} | {error[:100]} | |"""
lines.append(_lowerCAmelCase)
return "\n".join(_lowerCAmelCase)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = "| model | no. of errors | major error | count |"
lowerCamelCase_ : List[Any] = "|-:|-:|-:|-:|"
lowerCamelCase_ : int = [header, sep]
for model in reduced_by_model:
lowerCamelCase_ : Union[str, Any] = reduced_by_model[model]["count"]
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = list(reduced_by_model[model]["errors"].items())[0]
lowerCamelCase_ : Any = F"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(_lowerCAmelCase)
return "\n".join(_lowerCAmelCase)
if __name__ == "__main__":
__magic_name__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
__magic_name__ : Any = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__magic_name__ : int = get_job_links(args.workflow_run_id, token=args.token)
__magic_name__ : Any = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__magic_name__ : Tuple = k.find(''' / ''')
__magic_name__ : Any = k[index + len(''' / ''') :]
__magic_name__ : List[Any] = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__magic_name__ : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__magic_name__ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__magic_name__ : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__magic_name__ : List[Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__magic_name__ : List[str] = reduce_by_error(errors)
__magic_name__ : Optional[Any] = reduce_by_model(errors)
__magic_name__ : str = make_github_table(reduced_by_error)
__magic_name__ : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 700 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = StableDiffusionDiffEditPipeline
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__UpperCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : List[str] = frozenset([] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCamelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCamelCase_ : Dict = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ )
lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Tuple = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Union[str, Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : int = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ : int = self.get_dummy_inputs(a_ )
lowerCamelCase_ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0]
lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(a_ , 1E-4 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ )
lowerCamelCase_ : int = pipe.generate_mask(**a_ )
lowerCamelCase_ : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ : List[str] = np.array([0] * 9 )
lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : Dict = pipe.invert(**a_ ).images
lowerCamelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Dict = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def _UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ )
lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ )
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : str = pipe.invert(**a_ ).images
lowerCamelCase_ : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _UpperCamelCase ( cls ):
lowerCamelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase_ : List[Any] = raw_image
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : str = "a bowl of fruit"
lowerCamelCase_ : Optional[int] = "a bowl of pears"
lowerCamelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents
lowerCamelCase_ : List[str] = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = "a bowl of fruit"
lowerCamelCase_ : Dict = "a bowl of pears"
lowerCamelCase_ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents
lowerCamelCase_ : Any = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 73 | 0 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , *a_ , **a_ ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 701 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 73 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(_lowercase , _lowercase )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Union[str, Any] = kwargs.pop("sampling_rate" , _lowercase )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : List[Any] = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
_lowercase , sampling_rate=_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None and audios is not None:
lowerCamelCase_ : str = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer.model_input_names
lowerCamelCase_ : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 702 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=32 , a_=3 , a_=4 , a_=[10, 20, 30, 40] , a_=[2, 2, 3, 2] , a_=True , a_=True , a_=37 , a_="gelu" , a_=10 , a_=0.02 , a_=["stage2", "stage3", "stage4"] , a_=3 , a_=None , ):
lowerCamelCase_ : str = parent
lowerCamelCase_ : int = batch_size
lowerCamelCase_ : Any = image_size
lowerCamelCase_ : Optional[int] = num_channels
lowerCamelCase_ : Optional[int] = num_stages
lowerCamelCase_ : Any = hidden_sizes
lowerCamelCase_ : Any = depths
lowerCamelCase_ : Union[str, Any] = is_training
lowerCamelCase_ : Optional[int] = use_labels
lowerCamelCase_ : Optional[int] = intermediate_size
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : Any = type_sequence_label_size
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Union[str, Any] = out_features
lowerCamelCase_ : int = num_labels
lowerCamelCase_ : List[str] = scope
lowerCamelCase_ : Any = num_stages
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : str = None
if self.use_labels:
lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : int = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _UpperCamelCase ( self ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowercase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=lowercase_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Any = UperNetForSemanticSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCamelCase_ : Optional[int] = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.prepare_config_and_inputs()
(
lowerCamelCase_
) : str = config_and_inputs
lowerCamelCase_ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( snake_case__, snake_case__, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__UpperCAmelCase : Optional[int] = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[Any] = False
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = UperNetModelTester(self )
lowerCamelCase_ : Dict = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def _UpperCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ):
return
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : int = model_class(lowercase_ )
lowerCamelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase_ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="UperNet does not have a base model" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="UperNet does not have a base model" )
def _UpperCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
def check_hidden_states_output(a_ , a_ , a_ ):
lowerCamelCase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Optional[int] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowerCamelCase_ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Dict = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Tuple = _config_zero_init(lowercase_ )
lowerCamelCase_ : Optional[Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase_ : str = model_class(config=lowercase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def _UpperCamelCase ( self ):
pass
@slow
def _UpperCamelCase ( self ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : List[str] = UperNetForSemanticSegmentation.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg")
lowerCamelCase_ : str = Image.open(__SCREAMING_SNAKE_CASE).convert("RGB")
return image
@require_torch
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
lowerCamelCase_ : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(lowercase_ )
lowerCamelCase_ : List[Any] = prepare_img()
lowerCamelCase_ : Optional[int] = processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
with torch.no_grad():
lowerCamelCase_ : Dict = model(**lowercase_ )
lowerCamelCase_ : List[Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowerCamelCase_ : str = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase_ , atol=1E-4 ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
lowerCamelCase_ : Tuple = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(lowercase_ )
lowerCamelCase_ : Optional[Any] = prepare_img()
lowerCamelCase_ : Optional[int] = processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] = model(**lowercase_ )
lowerCamelCase_ : Any = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowerCamelCase_ : List[str] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase_ , atol=1E-4 ) )
| 703 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ):
lowerCamelCase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
lowerCamelCase_ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[int] = {}
if accepts_eta:
lowerCamelCase_ : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample
lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 |
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__magic_name__ = logging.getLogger(__name__)
def _A ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if os.path.exists(_lowerCamelCase):
if os.path.exists(os.path.join(_lowerCamelCase , "config.json")) and os.path.isfile(
os.path.join(_lowerCamelCase , "config.json")):
os.remove(os.path.join(_lowerCamelCase , "config.json"))
if os.path.exists(os.path.join(_lowerCamelCase , "pytorch_model.bin")) and os.path.isfile(
os.path.join(_lowerCamelCase , "pytorch_model.bin")):
os.remove(os.path.join(_lowerCamelCase , "pytorch_model.bin"))
else:
os.makedirs(_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
def _A ( lowerCAmelCase_ , lowerCAmelCase_=False):
'''simple docstring'''
lowerCamelCase_ : str = 2
if unlogit:
lowerCamelCase_ : Tuple = torch.pow(_lowerCamelCase , _lowerCamelCase)
lowerCamelCase_ : Optional[int] = p * torch.log(_lowerCamelCase)
lowerCamelCase_ : Optional[int] = 0
return -plogp.sum(dim=-1)
def _A ( lowerCAmelCase_):
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(_lowerCamelCase))))
for row in range(len(_lowerCamelCase)):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data))
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data))
def _A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False):
'''simple docstring'''
lowerCamelCase_ : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase_ : Optional[Any] = torch.zeros(_lowerCamelCase , _lowerCamelCase).to(args.device)
lowerCamelCase_ : Union[str, Any] = torch.zeros(_lowerCamelCase , _lowerCamelCase).to(args.device)
if head_mask is None:
lowerCamelCase_ : str = torch.ones(_lowerCamelCase , _lowerCamelCase).to(args.device)
head_mask.requires_grad_(requires_grad=_lowerCamelCase)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : Dict = 0.0
lowerCamelCase_ : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(_lowerCamelCase , desc="Iteration" , disable=args.local_rank not in [-1, 0])):
lowerCamelCase_ : int = tuple(t.to(args.device) for t in inputs)
(lowerCamelCase_ ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase_ : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , head_mask=_lowerCamelCase)
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase_ : Optional[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowerCamelCase):
lowerCamelCase_ : Tuple = entropy(attn.detach() , _lowerCamelCase)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowerCamelCase).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase_ : Optional[int] = 2
lowerCamelCase_ : Dict = torch.pow(torch.pow(_lowerCamelCase , _lowerCamelCase).sum(-1) , 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1E-20
if not args.dont_normalize_global_importance:
lowerCamelCase_ : Optional[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies")
print_ad_tensor(_lowerCamelCase)
if compute_importance:
logger.info("Head importance scores")
print_ad_tensor(_lowerCamelCase)
logger.info("Head ranked by importance scores")
lowerCamelCase_ : Any = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device)
lowerCamelCase_ : int = torch.arange(
head_importance.numel() , device=args.device)
lowerCamelCase_ : int = head_ranks.view_as(_lowerCamelCase)
print_ad_tensor(_lowerCamelCase)
return attn_entropy, head_importance, total_loss
def _A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Tuple = compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase)
lowerCamelCase_ : int = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , _lowerCamelCase , original_score * args.masking_threshold)
lowerCamelCase_ : int = torch.ones_like(_lowerCamelCase)
lowerCamelCase_ : List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount))
lowerCamelCase_ : Dict = original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase_ : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase_ : Optional[int] = float("Inf")
lowerCamelCase_ : Optional[Any] = head_importance.view(-1).sort()[1]
if len(_lowerCamelCase) <= num_to_mask:
print("BREAK BY num_to_mask")
break
# mask heads
lowerCamelCase_ : Tuple = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist()))
lowerCamelCase_ : int = new_head_mask.view(-1)
lowerCamelCase_ : Optional[Any] = 0.0
lowerCamelCase_ : Optional[Any] = new_head_mask.view_as(_lowerCamelCase)
lowerCamelCase_ : Any = new_head_mask.clone().detach()
print_ad_tensor(_lowerCamelCase)
# Compute metric and head importance again
lowerCamelCase_ : Union[str, Any] = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , head_mask=_lowerCamelCase)
lowerCamelCase_ : Optional[Any] = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , _lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask")
print_ad_tensor(_lowerCamelCase)
np.save(os.path.join(args.output_dir , "head_mask.npy") , head_mask.detach().cpu().numpy())
return head_mask
def _A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = datetime.now()
lowerCamelCase_ : List[str] = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase)
lowerCamelCase_ : int = 1 / loss
lowerCamelCase_ : Optional[Any] = datetime.now() - before_time
lowerCamelCase_ : Tuple = sum(p.numel() for p in model.parameters())
lowerCamelCase_ : Any = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCamelCase))
}
for k, v in heads_to_prune.items():
if isinstance(_lowerCamelCase , _lowerCamelCase):
lowerCamelCase_ : Optional[Any] = [
v,
]
assert sum(len(_lowerCamelCase) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowerCamelCase)
lowerCamelCase_ : Union[str, Any] = sum(p.numel() for p in model.parameters())
lowerCamelCase_ : Dict = datetime.now()
lowerCamelCase_ : List[Any] = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase , actually_pruned=_lowerCamelCase , )
lowerCamelCase_ : Optional[int] = 1 / loss
lowerCamelCase_ : Optional[int] = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , _lowerCamelCase , _lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , _lowerCamelCase , _lowerCamelCase)
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100)
save_model(_lowerCamelCase , args.output_dir)
def _A ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=_lowerCamelCase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=_lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=_lowerCamelCase , type=_lowerCamelCase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=_lowerCamelCase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances.")
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory")
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers")
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy.")
parser.add_argument(
"--masking_threshold" , default=0.9 , type=_lowerCamelCase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=_lowerCamelCase , help="Amount to heads to masking at each masking step.")
parser.add_argument("--metric_name" , default="acc" , type=_lowerCamelCase , help="Metric to use for head masking.")
parser.add_argument(
"--max_seq_length" , default=128 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=_lowerCamelCase , help="Batch size.")
parser.add_argument("--seed" , type=_lowerCamelCase , default=42)
parser.add_argument("--local_rank" , type=_lowerCamelCase , default=-1 , help="local_rank for distributed training on gpus")
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available")
parser.add_argument("--server_ip" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging.")
parser.add_argument("--server_port" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging.")
lowerCamelCase_ : Dict = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCamelCase)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase_ : List[str] = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
lowerCamelCase_ : List[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
lowerCamelCase_ : Optional[Any] = torch.device("cuda" , args.local_rank)
lowerCamelCase_ : Optional[int] = 1
torch.distributed.init_process_group(backend="nccl") # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1)))
lowerCamelCase_ : List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
lowerCamelCase_ : List[Any] = nn.parallel.DistributedDataParallel(
_lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowerCamelCase)
elif args.n_gpu > 1:
lowerCamelCase_ : Union[str, Any] = nn.DataParallel(_lowerCamelCase)
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowerCamelCase)
torch.save(_lowerCamelCase , os.path.join(args.output_dir , "run_args.bin"))
logger.info("Training/evaluation parameters %s" , _lowerCamelCase)
# Prepare dataset
lowerCamelCase_ : Union[str, Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa),
])
lowerCamelCase_ : str = (torch.from_numpy(_lowerCamelCase),)
lowerCamelCase_ : int = TensorDataset(*_lowerCamelCase)
lowerCamelCase_ : Union[str, Any] = RandomSampler(_lowerCamelCase)
lowerCamelCase_ : List[str] = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase , batch_size=args.batch_size)
# Compute head entropy and importance score
compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase_ : int = mask_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
prune_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main()
| 705 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__magic_name__ = NewType('''DataClass''', Any)
__magic_name__ = NewType('''DataClassType''', Any)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = {str(_SCREAMING_SNAKE_CASE): choice for choice in choices}
return lambda lowerCAmelCase_: str_to_choice.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def __magic_name__ ( *,
lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCamelCase_ : List[Any] = {}
if aliases is not None:
lowerCamelCase_ : int = aliases
if help is not None:
lowerCamelCase_ : Tuple = help
return dataclasses.field(metadata=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , default_factory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = 42
def __init__( self , a_ , **a_ ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCamelCase_ : Optional[Any] = ArgumentDefaultsHelpFormatter
super().__init__(**__UpperCamelCase )
if dataclasses.is_dataclass(__UpperCamelCase ):
lowerCamelCase_ : List[str] = [dataclass_types]
lowerCamelCase_ : int = list(__UpperCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__UpperCamelCase )
@staticmethod
def _UpperCamelCase ( a_ , a_ ):
lowerCamelCase_ : Optional[Any] = F"""--{field.name}"""
lowerCamelCase_ : Optional[int] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __UpperCamelCase ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
lowerCamelCase_ : int = kwargs.pop("aliases" , [] )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCamelCase_ : Optional[Any] = [aliases]
lowerCamelCase_ : List[Any] = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__UpperCamelCase , "UnionType" ) and isinstance(__UpperCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__UpperCamelCase ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F""" Problem encountered in field \'{field.name}\'.""" )
if type(__UpperCamelCase ) not in field.type.__args__:
# filter `str` in Union
lowerCamelCase_ : Union[str, Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCamelCase_ : str = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCamelCase_ : Union[str, Any] = (
field.type.__args__[0] if isinstance(__UpperCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCamelCase_ : str = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCamelCase_ : Dict = {}
if origin_type is Literal or (isinstance(field.type , __UpperCamelCase ) and issubclass(field.type , __UpperCamelCase )):
if origin_type is Literal:
lowerCamelCase_ : str = field.type.__args__
else:
lowerCamelCase_ : List[str] = [x.value for x in field.type]
lowerCamelCase_ : Union[str, Any] = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
lowerCamelCase_ : Dict = field.default
else:
lowerCamelCase_ : Tuple = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCamelCase_ : List[Any] = copy(__UpperCamelCase )
# Hack because type=bool in argparse does not behave as we want.
lowerCamelCase_ : Union[str, Any] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCamelCase_ : Tuple = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCamelCase_ : Any = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCamelCase_ : Dict = "?"
# This is the value that will get picked if we do --field_name (without value)
lowerCamelCase_ : Union[str, Any] = True
elif isclass(__UpperCamelCase ) and issubclass(__UpperCamelCase , __UpperCamelCase ):
lowerCamelCase_ : str = field.type.__args__[0]
lowerCamelCase_ : List[str] = "+"
if field.default_factory is not dataclasses.MISSING:
lowerCamelCase_ : str = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCamelCase_ : str = True
else:
lowerCamelCase_ : Any = field.type
if field.default is not dataclasses.MISSING:
lowerCamelCase_ : Any = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCamelCase_ : Optional[int] = field.default_factory()
else:
lowerCamelCase_ : List[Any] = True
parser.add_argument(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCamelCase_ : List[str] = False
parser.add_argument(F"""--no_{field.name}""" , action="store_false" , dest=field.name , **__UpperCamelCase )
def _UpperCamelCase ( self , a_ ):
if hasattr(__UpperCamelCase , "_argument_group_name" ):
lowerCamelCase_ : Optional[int] = self.add_argument_group(dtype._argument_group_name )
else:
lowerCamelCase_ : List[Any] = self
try:
lowerCamelCase_ : str = get_type_hints(__UpperCamelCase )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__UpperCamelCase ):
lowerCamelCase_ : str = ".".join(map(__UpperCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__UpperCamelCase ):
if not field.init:
continue
lowerCamelCase_ : List[str] = type_hints[field.name]
self._parse_dataclass_field(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self , a_=None , a_=False , a_=True , a_=None , a_=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCamelCase_ : Any = []
if args_filename:
args_files.append(Path(__UpperCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCamelCase_ : List[Any] = ArgumentParser()
args_file_parser.add_argument(__UpperCamelCase , type=__UpperCamelCase , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = args_file_parser.parse_known_args(args=__UpperCamelCase )
lowerCamelCase_ : Any = vars(__UpperCamelCase ).get(args_file_flag.lstrip("-" ) , __UpperCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(__UpperCamelCase ) for p in cmd_args_file_paths] )
lowerCamelCase_ : int = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCamelCase_ : Optional[int] = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = self.parse_known_args(args=__UpperCamelCase )
lowerCamelCase_ : Any = []
for dtype in self.dataclass_types:
lowerCamelCase_ : Optional[int] = {f.name for f in dataclasses.fields(__UpperCamelCase ) if f.init}
lowerCamelCase_ : Union[str, Any] = {k: v for k, v in vars(__UpperCamelCase ).items() if k in keys}
for k in keys:
delattr(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ : Tuple = dtype(**__UpperCamelCase )
outputs.append(__UpperCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__UpperCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _UpperCamelCase ( self , a_ , a_ = False ):
lowerCamelCase_ : str = set(args.keys() )
lowerCamelCase_ : Optional[int] = []
for dtype in self.dataclass_types:
lowerCamelCase_ : str = {f.name for f in dataclasses.fields(__UpperCamelCase ) if f.init}
lowerCamelCase_ : int = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCamelCase_ : List[str] = dtype(**__UpperCamelCase )
outputs.append(__UpperCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(__UpperCamelCase )}""" )
return tuple(__UpperCamelCase )
def _UpperCamelCase ( self , a_ , a_ = False ):
with open(Path(__UpperCamelCase ) , encoding="utf-8" ) as open_json_file:
lowerCamelCase_ : Dict = json.loads(open_json_file.read() )
lowerCamelCase_ : Union[str, Any] = self.parse_dict(__UpperCamelCase , allow_extra_keys=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCamelCase ( self , a_ , a_ = False ):
lowerCamelCase_ : Any = self.parse_dict(yaml.safe_load(Path(__UpperCamelCase ).read_text() ) , allow_extra_keys=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 706 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73 | 0 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class lowerCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self , a_ = True ):
lowerCamelCase_ : dict[T, list[T]] = {} # dictionary of lists
lowerCamelCase_ : List[str] = directed
def _UpperCamelCase ( self , a_ , a_ ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a_ )
self.adj_list[destination_vertex].append(a_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a_ )
lowerCamelCase_ : int = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(a_ )
lowerCamelCase_ : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCamelCase_ : Any = [destination_vertex]
lowerCamelCase_ : Optional[Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a_ )
lowerCamelCase_ : Optional[Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCamelCase_ : Tuple = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCamelCase_ : Optional[int] = [destination_vertex]
lowerCamelCase_ : List[Any] = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 707 |
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 73 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class lowerCAmelCase__ ( __A ):
"""simple docstring"""
__UpperCAmelCase : Tuple = """imagegpt"""
__UpperCAmelCase : List[Any] = ["""past_key_values"""]
__UpperCAmelCase : Any = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , a_=512 + 1 , a_=32 * 32 , a_=512 , a_=24 , a_=8 , a_=None , a_="quick_gelu" , a_=0.1 , a_=0.1 , a_=0.1 , a_=1E-5 , a_=0.02 , a_=True , a_=True , a_=False , a_=False , a_=False , **a_ , ):
lowerCamelCase_ : Union[str, Any] = vocab_size
lowerCamelCase_ : str = n_positions
lowerCamelCase_ : Dict = n_embd
lowerCamelCase_ : Tuple = n_layer
lowerCamelCase_ : List[str] = n_head
lowerCamelCase_ : List[Any] = n_inner
lowerCamelCase_ : Union[str, Any] = activation_function
lowerCamelCase_ : List[str] = resid_pdrop
lowerCamelCase_ : Optional[Any] = embd_pdrop
lowerCamelCase_ : Any = attn_pdrop
lowerCamelCase_ : Tuple = layer_norm_epsilon
lowerCamelCase_ : Optional[Any] = initializer_range
lowerCamelCase_ : Union[str, Any] = scale_attn_weights
lowerCamelCase_ : Dict = use_cache
lowerCamelCase_ : Dict = scale_attn_by_inverse_layer_idx
lowerCamelCase_ : Union[str, Any] = reorder_and_upcast_attn
lowerCamelCase_ : List[Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class lowerCAmelCase__ ( __A ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def _UpperCamelCase ( self , a_ , a_ = 1 , a_ = -1 , a_ = False , a_ = None , a_ = 3 , a_ = 32 , a_ = 32 , ):
lowerCamelCase_ : Dict = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ : List[Any] = dict(preprocessor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return inputs
| 708 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''PoolFormerFeatureExtractor''']
__magic_name__ = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 709 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__magic_name__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = '''left'''
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : str = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : Dict = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ ):
if self.remove_space:
lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ : str = inputs
lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ )
lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
lowerCamelCase_ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.preprocess_text(a_ )
lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
lowerCamelCase_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : int = cur_pieces[1:]
else:
lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ )
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
lowerCamelCase_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ : Union[str, Any] = "".join(a_ )
lowerCamelCase_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = test_file.split(os.path.sep)
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F"""{test_file} instead.""")
lowerCamelCase_ : Tuple = components[-1]
if not test_fn.endswith("py"):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""")
if not test_fn.startswith("test_modeling_"):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""")
lowerCamelCase_ : int = components[:-1] + [test_fn.replace(".py" , "")]
lowerCamelCase_ : int = ".".join(lowerCAmelCase_)
return test_module_path
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = get_module_path(lowerCAmelCase_)
lowerCamelCase_ : List[str] = importlib.import_module(lowerCAmelCase_)
return test_module
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : Union[str, Any] = get_test_module(lowerCAmelCase_)
for attr in dir(lowerCAmelCase_):
if attr.endswith("ModelTester"):
tester_classes.append(getattr(lowerCAmelCase_ , lowerCAmelCase_))
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x.__name__)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = []
lowerCamelCase_ : Optional[Any] = get_test_module(lowerCAmelCase_)
for attr in dir(lowerCAmelCase_):
lowerCamelCase_ : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_)
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase_ : List[Any] = getattr(lowerCAmelCase_ , "all_model_classes" , [])
if len(lowerCAmelCase_) > 0:
test_classes.append(lowerCAmelCase_)
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x.__name__)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = get_test_classes(lowerCAmelCase_)
lowerCamelCase_ : List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes)
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x.__name__)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = test_class()
if hasattr(lowerCAmelCase_ , "setUp"):
test.setUp()
lowerCamelCase_ : Optional[Any] = None
if hasattr(lowerCAmelCase_ , "model_tester"):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase_ : Any = test.model_tester.__class__
return model_tester
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = get_test_classes(lowerCAmelCase_)
lowerCamelCase_ : List[Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowerCAmelCase_)
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x.__name__)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : str = []
for test_class in test_classes:
lowerCamelCase_ : List[str] = get_model_tester_from_test_class(lowerCAmelCase_)
if tester_class is not None:
tester_classes.append(lowerCAmelCase_)
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x.__name__)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = get_test_classes(lowerCAmelCase_)
lowerCamelCase_ : List[Any] = {test_class: get_model_tester_from_test_class(lowerCAmelCase_) for test_class in test_classes}
return test_tester_mapping
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = get_model_classes(lowerCAmelCase_)
lowerCamelCase_ : Dict = {
model_class: get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_) for model_class in model_classes
}
return model_test_mapping
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Tuple = get_model_classes(lowerCAmelCase_)
lowerCamelCase_ : List[str] = {
model_class: get_tester_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_) for model_class in model_classes
}
return model_to_tester_mapping
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
return o
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_):
return o.__name__
elif isinstance(lowerCAmelCase_ , (list, tuple)):
return [to_json(lowerCAmelCase_) for x in o]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_):
return {to_json(lowerCAmelCase_): to_json(lowerCAmelCase_) for k, v in o.items()}
else:
return o
| 710 |
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int((number_a + number_a) / 2)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(lowerCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowerCamelCase_ : Optional[int] = lower
lowerCamelCase_ : Tuple = higher
lowerCamelCase_ : Union[str, Any] = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_)
last_numbers.append(lowerCAmelCase_)
if answer(lowerCAmelCase_) == "low":
lowerCamelCase_ : Any = number
elif answer(lowerCAmelCase_) == "high":
lowerCamelCase_ : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip())
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 | 0 |
__magic_name__ = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73 | 0 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ = logging.get_logger(__name__)
@add_end_docstrings(
_UpperCamelCase, r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''', )
class lowerCAmelCase__ ( _UpperCamelCase ):
"""simple docstring"""
def _UpperCamelCase ( self , a_ ):
if self.framework == "tf":
lowerCamelCase_ : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCamelCase_ : Optional[int] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Union[str, Any] = self.get_masked_index(_UpperCAmelCase )
lowerCamelCase_ : List[str] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _UpperCamelCase ( self , a_ ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_UpperCAmelCase )
def _UpperCamelCase ( self , a_ , a_=None , **a_ ):
if return_tensors is None:
lowerCamelCase_ : str = self.framework
lowerCamelCase_ : List[str] = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.ensure_exactly_one_mask_token(_UpperCAmelCase )
return model_inputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[int] = self.model(**_UpperCAmelCase )
lowerCamelCase_ : Optional[int] = model_inputs['''input_ids''']
return model_outputs
def _UpperCamelCase ( self , a_ , a_=5 , a_=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCamelCase_ : int = target_ids.shape[0]
lowerCamelCase_ : Optional[int] = model_outputs['''input_ids'''][0]
lowerCamelCase_ : int = model_outputs['''logits''']
if self.framework == "tf":
lowerCamelCase_ : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCamelCase_ : Tuple = outputs.numpy()
lowerCamelCase_ : Dict = outputs[0, masked_index, :]
lowerCamelCase_ : List[str] = stable_softmax(_UpperCAmelCase , axis=-1 )
if target_ids is not None:
lowerCamelCase_ : Any = tf.gather_nd(tf.squeeze(_UpperCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCamelCase_ : List[str] = tf.expand_dims(_UpperCAmelCase , 0 )
lowerCamelCase_ : int = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
lowerCamelCase_ : str = topk.values.numpy(), topk.indices.numpy()
else:
lowerCamelCase_ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCamelCase_ : Any = outputs[0, masked_index, :]
lowerCamelCase_ : int = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCamelCase_ : str = probs[..., target_ids]
lowerCamelCase_ : Optional[int] = probs.topk(_UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : Dict = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCamelCase_ : str = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCamelCase_ : List[Any] = input_ids.numpy().copy()
if target_ids is not None:
lowerCamelCase_ : str = target_ids[p].tolist()
lowerCamelCase_ : Tuple = p
# Filter padding out:
lowerCamelCase_ : Dict = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCamelCase_ : str = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowerCamelCase_ : Dict = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
if single_mask:
return result[0]
return result
def _UpperCamelCase ( self , a_ , a_=None ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_ : Any = [targets]
try:
lowerCamelCase_ : Dict = self.tokenizer.get_vocab()
except Exception:
lowerCamelCase_ : int = {}
lowerCamelCase_ : List[Any] = []
for target in targets:
lowerCamelCase_ : Optional[Any] = vocab.get(_UpperCAmelCase , _UpperCAmelCase )
if id_ is None:
lowerCamelCase_ : Any = self.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , max_length=1 , truncation=_UpperCAmelCase , )['''input_ids''']
if len(_UpperCAmelCase ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
lowerCamelCase_ : Optional[Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
lowerCamelCase_ : Tuple = list(set(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
lowerCamelCase_ : str = np.array(_UpperCAmelCase )
return target_ids
def _UpperCamelCase ( self , a_=None , a_=None ):
lowerCamelCase_ : Union[str, Any] = {}
if targets is not None:
lowerCamelCase_ : Any = self.get_target_ids(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase_ : int = target_ids
if top_k is not None:
lowerCamelCase_ : Tuple = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self , a_ , *a_ , **a_ ):
lowerCamelCase_ : List[Any] = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) == 1:
return outputs[0]
return outputs
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
import torch
from diffusers import DiffusionPipeline
class lowerCAmelCase__ ( _a ):
"""simple docstring"""
def __init__( self , a_ , a_ ):
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
def __call__( self ):
lowerCamelCase_ : str = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCamelCase_ : str = 1
lowerCamelCase_ : Any = self.unet(_A , _A ).sample
lowerCamelCase_ : Dict = self.scheduler.step(_A , _A , _A ).prev_sample
lowerCamelCase_ : Optional[Any] = scheduler_output - scheduler_output + torch.ones_like(_A )
return result
| 713 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self ):
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : Any = 0
lowerCamelCase_ : int = 0
def _UpperCamelCase ( self ):
return self.head == self.tail
def _UpperCamelCase ( self , a_ ):
self.data.append(__UpperCamelCase )
lowerCamelCase_ : List[str] = self.tail + 1
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.data[self.head]
lowerCamelCase_ : Optional[int] = self.head + 1
return ret
def _UpperCamelCase ( self ):
return self.tail - self.head
def _UpperCamelCase ( self ):
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ ):
lowerCamelCase_ : Dict = data
lowerCamelCase_ : List[str] = None
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : List[Any] = 1
def _UpperCamelCase ( self ):
return self.data
def _UpperCamelCase ( self ):
return self.left
def _UpperCamelCase ( self ):
return self.right
def _UpperCamelCase ( self ):
return self.height
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : str = data
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = node
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = node
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[str] = height
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if a > b:
return a
return b
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
print("left rotation node:" , node.get_data())
lowerCamelCase_ : Union[str, Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right())
ret.set_right(UpperCAmelCase__)
lowerCamelCase_ : int = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(UpperCAmelCase__)
lowerCamelCase_ : Dict = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(UpperCAmelCase__)
return ret
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
print("right rotation node:" , node.get_data())
lowerCamelCase_ : Dict = node.get_right()
assert ret is not None
node.set_right(ret.get_left())
ret.set_left(UpperCAmelCase__)
lowerCamelCase_ : int = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(UpperCAmelCase__)
lowerCamelCase_ : int = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(UpperCAmelCase__)
return ret
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = node.get_left()
assert left_child is not None
node.set_left(left_rotation(UpperCAmelCase__))
return right_rotation(UpperCAmelCase__)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(UpperCAmelCase__))
return left_rotation(UpperCAmelCase__)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if node is None:
return MyNode(UpperCAmelCase__)
if data < node.get_data():
node.set_left(insert_node(node.get_left() , UpperCAmelCase__))
if (
get_height(node.get_left()) - get_height(node.get_right()) == 2
): # an unbalance detected
lowerCamelCase_ : int = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowerCamelCase_ : Tuple = right_rotation(UpperCAmelCase__)
else:
lowerCamelCase_ : List[str] = lr_rotation(UpperCAmelCase__)
else:
node.set_right(insert_node(node.get_right() , UpperCAmelCase__))
if get_height(node.get_right()) - get_height(node.get_left()) == 2:
lowerCamelCase_ : List[str] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowerCamelCase_ : Tuple = rl_rotation(UpperCAmelCase__)
else:
lowerCamelCase_ : List[Any] = left_rotation(UpperCAmelCase__)
lowerCamelCase_ : List[Any] = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(UpperCAmelCase__)
return node
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
while True:
lowerCamelCase_ : Any = root.get_right()
if right_child is None:
break
lowerCamelCase_ : Optional[Any] = right_child
return root.get_data()
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
while True:
lowerCamelCase_ : str = root.get_left()
if left_child is None:
break
lowerCamelCase_ : List[Any] = left_child
return root.get_data()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = root.get_left()
lowerCamelCase_ : Optional[Any] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowerCamelCase_ : Any = get_left_most(UpperCAmelCase__)
root.set_data(UpperCAmelCase__)
root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__))
elif left_child is not None:
lowerCamelCase_ : Any = left_child
elif right_child is not None:
lowerCamelCase_ : Any = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data")
return root
else:
root.set_left(del_node(UpperCAmelCase__ , UpperCAmelCase__))
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__))
if get_height(UpperCAmelCase__) - get_height(UpperCAmelCase__) == 2:
assert right_child is not None
if get_height(right_child.get_right()) > get_height(right_child.get_left()):
lowerCamelCase_ : Dict = left_rotation(UpperCAmelCase__)
else:
lowerCamelCase_ : Union[str, Any] = rl_rotation(UpperCAmelCase__)
elif get_height(UpperCAmelCase__) - get_height(UpperCAmelCase__) == -2:
assert left_child is not None
if get_height(left_child.get_left()) > get_height(left_child.get_right()):
lowerCamelCase_ : Optional[int] = right_rotation(UpperCAmelCase__)
else:
lowerCamelCase_ : Optional[Any] = lr_rotation(UpperCAmelCase__)
lowerCamelCase_ : Any = my_max(get_height(root.get_right()) , get_height(root.get_left())) + 1
root.set_height(UpperCAmelCase__)
return root
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self ):
lowerCamelCase_ : Optional[int] = None
def _UpperCamelCase ( self ):
return get_height(self.root )
def _UpperCamelCase ( self , a_ ):
print("insert:" + str(__UpperCamelCase ) )
lowerCamelCase_ : int = insert_node(self.root , __UpperCamelCase )
def _UpperCamelCase ( self , a_ ):
print("delete:" + str(__UpperCamelCase ) )
if self.root is None:
print("Tree is empty!" )
return
lowerCamelCase_ : Optional[Any] = del_node(self.root , __UpperCamelCase )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
lowerCamelCase_ : Tuple = ""
lowerCamelCase_ : List[Any] = MyQueue()
q.push(self.root )
lowerCamelCase_ : Any = self.get_height()
if layer == 0:
return output
lowerCamelCase_ : Optional[Any] = 0
while not q.is_empty():
lowerCamelCase_ : Optional[Any] = q.pop()
lowerCamelCase_ : List[str] = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__UpperCamelCase )
q.push(__UpperCamelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowerCamelCase_ : Tuple = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , __UpperCamelCase ) - 1:
lowerCamelCase_ : Dict = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_snake_case = AVLtree()
_snake_case = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 714 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_) , lowerCAmelCase_)
return number - int(lowerCAmelCase_)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 73 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__magic_name__ = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class lowerCAmelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , a_=None , a_=None , *a_ , **a_ ):
super().__init__(*a_ , **a_ )
if config is None:
assert isinstance(self.model , a_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
lowerCamelCase_ : Optional[Any] = self.model.config
else:
lowerCamelCase_ : Tuple = config
lowerCamelCase_ : List[str] = data_args
lowerCamelCase_ : Dict = self.config.tgt_vocab_size if isinstance(self.config , a_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
lowerCamelCase_ : str = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowerCamelCase_ : Tuple = label_smoothed_nll_loss
def _UpperCamelCase ( self , a_ ):
if self.optimizer is None:
lowerCamelCase_ : Tuple = ["bias", "LayerNorm.weight"]
lowerCamelCase_ : List[str] = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
lowerCamelCase_ : Tuple = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowerCamelCase_ : Optional[Any] = Adafactor
lowerCamelCase_ : Any = {"scale_parameter": False, "relative_step": False}
else:
lowerCamelCase_ : Union[str, Any] = AdamW
lowerCamelCase_ : Union[str, Any] = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
lowerCamelCase_ : List[Any] = self.args.learning_rate
if self.sharded_ddp:
lowerCamelCase_ : int = OSS(
params=a_ , optim=a_ , **a_ , )
else:
lowerCamelCase_ : Union[str, Any] = optimizer_cls(a_ , **a_ )
if self.lr_scheduler is None:
lowerCamelCase_ : Optional[Any] = self._get_lr_scheduler(a_ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[str] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowerCamelCase_ : Any = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowerCamelCase_ : List[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowerCamelCase_ : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=a_ )
return scheduler
def _UpperCamelCase ( self ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _UpperCamelCase ( self , a_ , a_ , a_ ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowerCamelCase_ : Any = model(**a_ , use_cache=a_ )[0]
lowerCamelCase_ : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = model(**a_ , labels=a_ , use_cache=a_ )[:2]
else:
# compute label smoothed loss
lowerCamelCase_ : Tuple = model(**a_ , use_cache=a_ )[0]
lowerCamelCase_ : int = torch.nn.functional.log_softmax(a_ , dim=-1 )
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = self.loss_fn(a_ , a_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Optional[int] = inputs.pop("labels" )
lowerCamelCase_ ,lowerCamelCase_ : Dict = self._compute_loss(a_ , a_ , a_ )
return loss
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ = None , ):
lowerCamelCase_ : Tuple = self._prepare_inputs(a_ )
lowerCamelCase_ : List[str] = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowerCamelCase_ : Tuple = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **a_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowerCamelCase_ : List[str] = self._pad_tensors_to_max_len(a_ , gen_kwargs["max_length"] )
lowerCamelCase_ : Dict = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
lowerCamelCase_ ,lowerCamelCase_ : List[str] = self._compute_loss(a_ , a_ , a_ )
lowerCamelCase_ : List[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowerCamelCase_ : Any = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowerCamelCase_ : Any = self._pad_tensors_to_max_len(a_ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Optional[int] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F""" padded to `max_length`={max_length}""" )
lowerCamelCase_ : List[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowerCamelCase_ : Optional[Any] = tensor
return padded_tensor
| 716 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''luke'''
def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = entity_vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = entity_emb_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[int] = use_entity_aware_attention
lowerCamelCase_ : str = classifier_dropout
| 73 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCAmelCase__ ( __lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : int = "realm"
def __init__( self , a_=3_0522 , a_=768 , a_=128 , a_=12 , a_=12 , a_=8 , a_=3072 , a_="gelu_new" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=256 , a_=10 , a_=1E-3 , a_=5 , a_=320 , a_=1335_3718 , a_=5000 , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
# Common config
lowerCamelCase_ : int = vocab_size
lowerCamelCase_ : Optional[int] = max_position_embeddings
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : Union[str, Any] = retriever_proj_size
lowerCamelCase_ : Dict = num_hidden_layers
lowerCamelCase_ : str = num_attention_heads
lowerCamelCase_ : int = num_candidates
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : Tuple = hidden_act
lowerCamelCase_ : List[str] = hidden_dropout_prob
lowerCamelCase_ : str = attention_probs_dropout_prob
lowerCamelCase_ : Union[str, Any] = initializer_range
lowerCamelCase_ : List[str] = type_vocab_size
lowerCamelCase_ : str = layer_norm_eps
# Reader config
lowerCamelCase_ : Dict = span_hidden_size
lowerCamelCase_ : int = max_span_width
lowerCamelCase_ : str = reader_layer_norm_eps
lowerCamelCase_ : int = reader_beam_size
lowerCamelCase_ : Optional[Any] = reader_seq_len
# Retrieval config
lowerCamelCase_ : Dict = num_block_records
lowerCamelCase_ : Optional[Any] = searcher_beam_size
| 717 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id")
lowerCamelCase_ : Dict = partition_df.collect()
lowerCamelCase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , a_ , a_=None , ):
lowerCamelCase_ : Dict = df
lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = SparkConfig
def __init__( self , a_ , a_ = None , a_ = None , **a_ , ):
import pyspark
lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ : Optional[Any] = df
lowerCamelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(a_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , a_ ):
import pyspark
def get_arrow_batch_size(a_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowerCamelCase_ : str = self.df.count()
lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ : int = self.df.repartition(a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
import pyspark
lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowerCamelCase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ : int = self.config.features
lowerCamelCase_ : Any = self._writer_batch_size
lowerCamelCase_ : Tuple = self._fs.storage_options
def write_arrow(a_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowerCamelCase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowerCamelCase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ):
self._validate_cache_dir()
lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs )
lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join
lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCamelCase_ : int = path_join(self._output_dir , a_ )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowerCamelCase_ : Dict = total_num_examples
lowerCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a_ , a_ , a_ , ):
rename(
a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Dict = 0
for i in range(len(a_ ) ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def _UpperCamelCase ( self , a_ , ):
return SparkExamplesIterable(self.df )
| 73 | 0 |
import os
def __magic_name__ ( lowerCAmelCase_ = "matrix.txt"):
'''simple docstring'''
with open(os.path.join(os.path.dirname(snake_case_) , snake_case_)) as in_file:
lowerCamelCase_ : Dict = in_file.read()
lowerCamelCase_ : Optional[int] = [[int(snake_case_) for cell in row.split(",")] for row in data.strip().splitlines()]
lowerCamelCase_ : int = [[0 for cell in row] for row in grid]
lowerCamelCase_ : Dict = len(grid[0])
lowerCamelCase_ : Dict = [[0 for i in range(snake_case_)] for j in range(snake_case_)]
lowerCamelCase_ : Optional[int] = grid[0][0]
for i in range(1 , snake_case_):
lowerCamelCase_ : Optional[int] = grid[0][i] + dp[0][i - 1]
for i in range(1 , snake_case_):
lowerCamelCase_ : List[Any] = grid[i][0] + dp[i - 1][0]
for i in range(1 , snake_case_):
for j in range(1 , snake_case_):
lowerCamelCase_ : Dict = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1])
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 718 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
def __magic_name__ ( lowerCAmelCase_ = 5000_0000):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = set()
lowerCamelCase_ : List[Any] = int((limit - 24) ** (1 / 2))
lowerCamelCase_ : Optional[int] = set(range(3 , prime_square_limit + 1 , 2))
primes.add(2)
for p in range(3 , prime_square_limit + 1 , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , UpperCamelCase__)))
for primea in primes:
lowerCamelCase_ : Tuple = primea * primea
for primea in primes:
lowerCamelCase_ : Dict = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCamelCase_ : List[Any] = primea * primea * primea * primea
lowerCamelCase_ : Optional[int] = square + cube + tetr
if total >= limit:
break
ret.add(UpperCamelCase__)
return len(UpperCamelCase__)
if __name__ == "__main__":
print(f'''{solution() = }''')
| 719 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ctrl'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Optional[int] = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Union[str, Any] = n_head
lowerCamelCase_ : str = dff
lowerCamelCase_ : Tuple = resid_pdrop
lowerCamelCase_ : Any = embd_pdrop
lowerCamelCase_ : Dict = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Any = use_cache
super().__init__(**a_ )
| 73 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""pixel_values"""]
def __init__( self , a_ = True , a_ = None , a_ = 0.9 , a_ = PILImageResampling.BICUBIC , a_ = True , a_ = None , a_ = 1 / 255 , a_ = True , a_ = True , a_ = None , a_ = None , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : int = size if size is not None else {"shortest_edge": 224}
lowerCamelCase_ : List[Any] = get_size_dict(a_ , default_to_square=a_ )
lowerCamelCase_ : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase_ : Dict = get_size_dict(a_ , param_name="crop_size" )
lowerCamelCase_ : str = do_resize
lowerCamelCase_ : Optional[Any] = size
lowerCamelCase_ : Optional[int] = crop_pct
lowerCamelCase_ : List[Any] = resample
lowerCamelCase_ : Optional[Any] = do_center_crop
lowerCamelCase_ : Optional[int] = crop_size
lowerCamelCase_ : Optional[Any] = do_rescale
lowerCamelCase_ : Dict = rescale_factor
lowerCamelCase_ : Optional[Any] = do_normalize
lowerCamelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCamelCase ( self , a_ , a_ , a_ = None , a_ = PILImageResampling.BICUBIC , a_ = None , **a_ , ):
lowerCamelCase_ : Optional[int] = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCamelCase_ : List[Any] = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCamelCase_ : Optional[int] = int(size["height"] / crop_pct )
else:
lowerCamelCase_ : Union[str, Any] = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(a_ ) )
lowerCamelCase_ : Optional[Any] = get_resize_output_image_size(a_ , size=a_ , default_to_square=a_ )
else:
if "shortest_edge" in size:
lowerCamelCase_ : Optional[int] = get_resize_output_image_size(a_ , size=size["shortest_edge"] , default_to_square=a_ )
elif "height" in size and "width" in size:
lowerCamelCase_ : int = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(a_ ) )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ = None , **a_ , ):
lowerCamelCase_ : List[Any] = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ = None , **a_ , ):
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ = None , **a_ , ):
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ):
lowerCamelCase_ : int = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ : List[str] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase_ : List[Any] = resample if resample is not None else self.resample
lowerCamelCase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ : Any = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ : Dict = image_std if image_std is not None else self.image_std
lowerCamelCase_ : List[Any] = size if size is not None else self.size
lowerCamelCase_ : Any = get_size_dict(a_ , default_to_square=a_ )
lowerCamelCase_ : Dict = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ : Union[str, Any] = get_size_dict(a_ , param_name="crop_size" )
lowerCamelCase_ : str = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCamelCase_ : Optional[int] = [to_numpy_array(a_ ) for image in images]
if do_resize:
lowerCamelCase_ : Union[str, Any] = [self.resize(image=a_ , size=a_ , crop_pct=a_ , resample=a_ ) for image in images]
if do_center_crop:
lowerCamelCase_ : Union[str, Any] = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
lowerCamelCase_ : int = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
lowerCamelCase_ : Any = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
lowerCamelCase_ : Dict = [to_channel_dimension_format(a_ , a_ ) for image in images]
lowerCamelCase_ : Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 720 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
__UpperCAmelCase : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def _UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__UpperCAmelCase : float = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
def _UpperCamelCase ( self ):
if self.train_file is not None:
lowerCamelCase_ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f:
lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())]
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCamelCase_ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Any = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split(".")[-1]
if extension == "txt":
lowerCamelCase_ : List[str] = "text"
lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(F"""New config: {config}""")
lowerCamelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name.")
if model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_)
model.resize_token_embeddings(len(lowerCAmelCase_))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Optional[Any] = datasets["train"].column_names
else:
lowerCamelCase_ : Dict = datasets["validation"].column_names
lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_):
# Remove empty lines
lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length)
lowerCamelCase_ : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCamelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCamelCase_ : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCamelCase_ : Dict = model_args.model_name_or_path
else:
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json"))
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase_ : Tuple = trainer.evaluate()
lowerCamelCase_ : str = math.exp(eval_output["eval_loss"])
lowerCamelCase_ : Tuple = perplexity
lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
return results
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 73 | 0 |
from __future__ import annotations
import queue
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ ):
lowerCamelCase_ : Tuple = data
lowerCamelCase_ : Dict = None
lowerCamelCase_ : Any = None
def __magic_name__ ( ):
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n")
lowerCamelCase_ : Dict = input("Enter the value of the root node: ").strip().lower()
lowerCamelCase_ : str = queue.Queue()
lowerCamelCase_ : Union[str, Any] = TreeNode(int(lowerCAmelCase_))
q.put(lowerCAmelCase_)
while not q.empty():
lowerCamelCase_ : Tuple = q.get()
lowerCamelCase_ : Any = F"""Enter the left node of {node_found.data}: """
lowerCamelCase_ : str = input(lowerCAmelCase_).strip().lower() or "n"
if check == "n":
return tree_node
lowerCamelCase_ : Optional[Any] = TreeNode(int(lowerCAmelCase_))
lowerCamelCase_ : Optional[int] = left_node
q.put(lowerCAmelCase_)
lowerCamelCase_ : Any = F"""Enter the right node of {node_found.data}: """
lowerCamelCase_ : Optional[int] = input(lowerCAmelCase_).strip().lower() or "n"
if check == "n":
return tree_node
lowerCamelCase_ : Optional[Any] = TreeNode(int(lowerCAmelCase_))
lowerCamelCase_ : Union[str, Any] = right_node
q.put(lowerCAmelCase_)
raise
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or not node:
return
print(node.data , end=",")
pre_order(node.left)
pre_order(node.right)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or not node:
return
in_order(node.left)
print(node.data , end=",")
in_order(node.right)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or not node:
return
post_order(node.left)
post_order(node.right)
print(node.data , end=",")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or not node:
return
lowerCamelCase_ : str = queue.Queue()
q.put(lowerCAmelCase_)
while not q.empty():
lowerCamelCase_ : Dict = q.get()
print(node_dequeued.data , end=",")
if node_dequeued.left:
q.put(node_dequeued.left)
if node_dequeued.right:
q.put(node_dequeued.right)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or not node:
return
lowerCamelCase_ : int = queue.Queue()
q.put(lowerCAmelCase_)
while not q.empty():
lowerCamelCase_ : Tuple = []
while not q.empty():
lowerCamelCase_ : List[Any] = q.get()
print(node_dequeued.data , end=",")
if node_dequeued.left:
list_.append(node_dequeued.left)
if node_dequeued.right:
list_.append(node_dequeued.right)
print()
for node in list_:
q.put(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or not node:
return
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : Optional[int] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=",")
stack.append(lowerCAmelCase_)
lowerCamelCase_ : int = n.left
# end of while means current node doesn't have left child
lowerCamelCase_ : Optional[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase_ : Any = n.right
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or not node:
return
lowerCamelCase_ : Any = []
lowerCamelCase_ : str = node
while n or stack:
while n:
stack.append(lowerCAmelCase_)
lowerCamelCase_ : int = n.left
lowerCamelCase_ : Dict = stack.pop()
print(n.data , end=",")
lowerCamelCase_ : Any = n.right
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or not node:
return
lowerCamelCase_ ,lowerCamelCase_ : Tuple = [], []
lowerCamelCase_ : Union[str, Any] = node
stacka.append(lowerCAmelCase_)
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase_ : str = stacka.pop()
if n.left:
stacka.append(n.left)
if n.right:
stacka.append(n.right)
stacka.append(lowerCAmelCase_)
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=",")
def __magic_name__ ( lowerCAmelCase_ = "" , lowerCAmelCase_=50 , lowerCAmelCase_="*"):
'''simple docstring'''
if not s:
return "\n" + width * char
lowerCamelCase_ ,lowerCamelCase_ : Tuple = divmod(width - len(lowerCAmelCase_) - 2 , 2)
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
__magic_name__ = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 721 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
# setable values
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _UpperCamelCase ( cls ):
return cls()
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ):
pass
def _UpperCamelCase ( self ):
return KarrasVeSchedulerState.create()
def _UpperCamelCase ( self , a_ , a_ , a_ = () ):
lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy()
lowerCamelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 )
lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape )
lowerCamelCase_ : List[str] = sigma + gamma * sigma
lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
raise NotImplementedError()
| 73 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 700 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = StableDiffusionDiffEditPipeline
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__UpperCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : List[str] = frozenset([] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCamelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCamelCase_ : Dict = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ )
lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Tuple = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Union[str, Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : int = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ : int = self.get_dummy_inputs(a_ )
lowerCamelCase_ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0]
lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(a_ , 1E-4 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ )
lowerCamelCase_ : int = pipe.generate_mask(**a_ )
lowerCamelCase_ : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ : List[str] = np.array([0] * 9 )
lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : Dict = pipe.invert(**a_ ).images
lowerCamelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Dict = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def _UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ )
lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ )
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : str = pipe.invert(**a_ ).images
lowerCamelCase_ : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _UpperCamelCase ( cls ):
lowerCamelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase_ : List[Any] = raw_image
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : str = "a bowl of fruit"
lowerCamelCase_ : Optional[int] = "a bowl of pears"
lowerCamelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents
lowerCamelCase_ : List[str] = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = "a bowl of fruit"
lowerCamelCase_ : Dict = "a bowl of pears"
lowerCamelCase_ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents
lowerCamelCase_ : Any = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 73 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ , a_ = 13 , a_ = 64 , a_ = 2 , a_ = 3 , a_ = 3 , a_ = True , a_ = True , a_ = 128 , a_=[16, 32, 64, 128] , a_ = 7 , a_ = 4 , a_ = 37 , a_ = "gelu" , a_ = 0.1 , a_ = 0.1 , a_ = 10 , a_ = 0.02 , a_ = 2 , a_ = 1 , a_ = 128 , a_ = [2, 2, 2, 2] , a_ = 2 , a_ = 2 , ):
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Dict = batch_size
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : Optional[Any] = patch_size
lowerCamelCase_ : List[str] = num_channels
lowerCamelCase_ : Tuple = is_training
lowerCamelCase_ : Optional[Any] = use_labels
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : Tuple = num_attention_heads
lowerCamelCase_ : List[Any] = intermediate_size
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : Dict = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : List[Any] = type_sequence_label_size
lowerCamelCase_ : Optional[Any] = initializer_range
lowerCamelCase_ : Any = encoder_stride
lowerCamelCase_ : str = num_attention_outputs
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Dict = embed_dim + 1
lowerCamelCase_ : str = resolution
lowerCamelCase_ : int = depths
lowerCamelCase_ : Optional[Any] = hidden_sizes
lowerCamelCase_ : str = dim
lowerCamelCase_ : Any = mlp_expansion_ratio
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : Optional[int] = None
if self.use_labels:
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Union[str, Any] = TFEfficientFormerModel(config=a_ )
lowerCamelCase_ : Optional[int] = model(a_ , training=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Optional[Any] = self.type_sequence_label_size
lowerCamelCase_ : List[str] = TFEfficientFormerForImageClassification(a_ )
lowerCamelCase_ : Union[str, Any] = model(a_ , labels=a_ , training=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ : int = 1
lowerCamelCase_ : Union[str, Any] = TFEfficientFormerForImageClassification(a_ )
lowerCamelCase_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.prepare_config_and_inputs()
lowerCamelCase_ : List[Any] = config_and_inputs
lowerCamelCase_ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Tuple = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Dict = False
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = TFEfficientFormerModelTester(self )
lowerCamelCase_ : str = ConfigTester(
self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : str = model_class(a_ )
lowerCamelCase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Optional[int] = [*signature.parameters.keys()]
lowerCamelCase_ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def _UpperCamelCase ( self ):
def check_hidden_states_output(a_ , a_ , a_ ):
lowerCamelCase_ : int = model_class(a_ )
lowerCamelCase_ : int = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
lowerCamelCase_ : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ : List[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a_ ) , a_ )
if hasattr(self.model_tester , "encoder_seq_length" ):
lowerCamelCase_ : List[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
lowerCamelCase_ : int = seq_length * self.model_tester.chunk_length
else:
lowerCamelCase_ : Union[str, Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCamelCase_ : str = outputs.decoder_hidden_states
self.asseretIsInstance(a_ , (list, tuple) )
self.assertEqual(len(a_ ) , a_ )
lowerCamelCase_ : Any = getattr(self.model_tester , "seq_length" , a_ )
lowerCamelCase_ : Union[str, Any] = getattr(self.model_tester , "decoder_seq_length" , a_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[int] = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Tuple = True
check_hidden_states_output(a_ , a_ , a_ )
def _UpperCamelCase ( self , a_ , a_ , a_=False ):
lowerCamelCase_ : List[str] = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Tuple = TFEfficientFormerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : Tuple = getattr(self.model_tester , "seq_length" , a_ )
lowerCamelCase_ : List[Any] = getattr(self.model_tester , "encoder_seq_length" , a_ )
lowerCamelCase_ : Dict = getattr(self.model_tester , "key_length" , a_ )
lowerCamelCase_ : Optional[Any] = getattr(self.model_tester , "chunk_length" , a_ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
lowerCamelCase_ : Optional[int] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Dict = True
lowerCamelCase_ : str = model_class(a_ )
lowerCamelCase_ : Dict = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
lowerCamelCase_ : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : Any = model_class(a_ )
lowerCamelCase_ : str = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
lowerCamelCase_ : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _UpperCamelCase ( self ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCamelCase_ : Optional[int] = model_class(a_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCamelCase_ : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=a_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCamelCase_ : List[str] = model(a_ )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
lowerCamelCase_ : List[str] = self.default_image_processor
lowerCamelCase_ : Tuple = prepare_img()
lowerCamelCase_ : str = image_processor(images=a_ , return_tensors="tf" )
# forward pass
lowerCamelCase_ : Any = model(**a_ , training=a_ )
# verify the logits
lowerCamelCase_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
lowerCamelCase_ : Any = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
lowerCamelCase_ : List[str] = self.default_image_processor
lowerCamelCase_ : int = prepare_img()
lowerCamelCase_ : Optional[int] = image_processor(images=a_ , return_tensors="tf" )
# forward pass
lowerCamelCase_ : int = model(**a_ , training=a_ )
# verify the logits
lowerCamelCase_ : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
lowerCamelCase_ : List[str] = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) )
| 701 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 73 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ = None , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , **a_ , ):
lowerCamelCase_ : Optional[Any] = path_or_paths
lowerCamelCase_ : List[str] = split if split or isinstance(a_ , a_ ) else "train"
lowerCamelCase_ : Tuple = features
lowerCamelCase_ : List[str] = cache_dir
lowerCamelCase_ : Optional[int] = keep_in_memory
lowerCamelCase_ : Dict = streaming
lowerCamelCase_ : Dict = num_proc
lowerCamelCase_ : Any = kwargs
@abstractmethod
def _UpperCamelCase ( self ):
pass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , **a_ , ):
lowerCamelCase_ : Any = features
lowerCamelCase_ : Optional[Any] = cache_dir
lowerCamelCase_ : Optional[Any] = keep_in_memory
lowerCamelCase_ : int = streaming
lowerCamelCase_ : Tuple = num_proc
lowerCamelCase_ : Optional[Any] = kwargs
@abstractmethod
def _UpperCamelCase ( self ):
pass
| 702 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73 | 0 |
import math
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return math.pow(lowerCAmelCase_ , 2) - a
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return 2 * x
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = 2.0
while start <= a:
lowerCamelCase_ : List[Any] = math.pow(lowerCAmelCase_ , 2)
return start
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 9999 , lowerCAmelCase_ = 0.00_00_00_00_00_00_01):
'''simple docstring'''
if a < 0:
raise ValueError("math domain error")
lowerCamelCase_ : List[str] = get_initial_point(lowerCAmelCase_)
for _ in range(lowerCAmelCase_):
lowerCamelCase_ : Tuple = value
lowerCamelCase_ : Optional[int] = value - fx(lowerCAmelCase_ , lowerCAmelCase_) / fx_derivative(lowerCAmelCase_)
if abs(prev_value - value) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ):
lowerCamelCase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
lowerCamelCase_ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[int] = {}
if accepts_eta:
lowerCamelCase_ : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample
lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 73 | 0 |
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(lowerCAmelCase_ , (list, tuple)) or not all(
isinstance(lowerCAmelCase_ , lowerCAmelCase_) for number in numbers):
raise ValueError("numbers must be an iterable of integers")
lowerCamelCase_ : List[str] = numbers[0]
for i in range(1 , len(lowerCAmelCase_)):
# update the maximum and minimum subarray products
lowerCamelCase_ : List[Any] = numbers[i]
if number < 0:
lowerCamelCase_ : Optional[Any] = min_till_now, max_till_now
lowerCamelCase_ : Dict = max(lowerCAmelCase_ , max_till_now * number)
lowerCamelCase_ : Any = min(lowerCAmelCase_ , min_till_now * number)
# update the maximum product found till now
lowerCamelCase_ : Optional[int] = max(lowerCAmelCase_ , lowerCAmelCase_)
return max_prod
| 704 |
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
import os
from distutils.util import strtobool
def _A ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
for e in env_keys:
lowerCamelCase_ : Any = int(os.environ.get(lowerCAmelCase_ , -1))
if val >= 0:
return val
return default
def _A ( lowerCAmelCase_ , lowerCAmelCase_=False):
'''simple docstring'''
lowerCamelCase_ : List[str] = os.environ.get(lowerCAmelCase_ , str(lowerCAmelCase_))
return strtobool(lowerCAmelCase_) == 1 # As its name indicates `strtobool` actually returns an int...
def _A ( lowerCAmelCase_ , lowerCAmelCase_="no"):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = os.environ.get(lowerCAmelCase_ , str(lowerCAmelCase_))
return value
| 705 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : List[str] = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase_)
lowerCamelCase_ : Optional[Any] = parser.add_subparsers(help="accelerate command helpers")
# Register commands
get_config_parser(subparsers=lowerCAmelCase_)
env_command_parser(subparsers=lowerCAmelCase_)
launch_command_parser(subparsers=lowerCAmelCase_)
tpu_command_parser(subparsers=lowerCAmelCase_)
test_command_parser(subparsers=lowerCAmelCase_)
# Let's go
lowerCamelCase_ : Any = parser.parse_args()
if not hasattr(lowerCAmelCase_ , "func"):
parser.print_help()
exit(1)
# Run
args.func(lowerCAmelCase_)
if __name__ == "__main__":
main()
| 707 |
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 73 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
lowerCamelCase_ : Dict = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_).raw).convert("RGB")
return image
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding"))
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding"))
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight"))
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias"))
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight"))
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias"))
for i in range(config.vision_config.num_hidden_layers):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",))
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias"""))
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight"))
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias"))
# fmt: on
return rename_keys
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = dct.pop(lowerCAmelCase_)
lowerCamelCase_ : Any = val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers):
# read in original q and v biases
lowerCamelCase_ : str = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""")
lowerCamelCase_ : List[str] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""")
# next, set bias in the state dict
lowerCamelCase_ : List[Any] = torch.cat((q_bias, torch.zeros_like(lowerCAmelCase_ , requires_grad=lowerCAmelCase_), v_bias))
lowerCamelCase_ : int = qkv_bias
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = 364 if "coco" in model_name else 224
lowerCamelCase_ : Optional[int] = BlipaVisionConfig(image_size=lowerCAmelCase_).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCamelCase_ : Optional[Any] = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=lowerCAmelCase_).to_dict()
elif "opt-6.7b" in model_name:
lowerCamelCase_ : Tuple = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=lowerCAmelCase_).to_dict()
elif "t5-xl" in model_name:
lowerCamelCase_ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1).to_dict()
elif "t5-xxl" in model_name:
lowerCamelCase_ : str = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1).to_dict()
lowerCamelCase_ : Optional[Any] = BlipaConfig(vision_config=lowerCAmelCase_ , text_config=lowerCAmelCase_)
return config, image_size
@torch.no_grad()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False):
'''simple docstring'''
lowerCamelCase_ : Dict = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b")
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl")
)
lowerCamelCase_ : int = tokenizer("\n" , add_special_tokens=lowerCAmelCase_).input_ids[0]
lowerCamelCase_ : Tuple = get_blipa_config(lowerCAmelCase_ , eos_token_id=lowerCAmelCase_)
lowerCamelCase_ : List[Any] = BlipaForConditionalGeneration(lowerCAmelCase_).eval()
lowerCamelCase_ : Any = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
lowerCamelCase_ : Optional[int] = model_name_to_original[model_name]
# load original model
print("Loading original model...")
lowerCamelCase_ : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
lowerCamelCase_ : List[Any] = load_model_and_preprocess(
name=lowerCAmelCase_ , model_type=lowerCAmelCase_ , is_eval=lowerCAmelCase_ , device=lowerCAmelCase_)
original_model.eval()
print("Done!")
# update state dict keys
lowerCamelCase_ : Optional[Any] = original_model.state_dict()
lowerCamelCase_ : Optional[int] = create_rename_keys(lowerCAmelCase_)
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCamelCase_ : List[str] = state_dict.pop(lowerCAmelCase_)
if key.startswith("Qformer.bert"):
lowerCamelCase_ : List[Any] = key.replace("Qformer.bert" , "qformer")
if "attention.self" in key:
lowerCamelCase_ : Optional[Any] = key.replace("self" , "attention")
if "opt_proj" in key:
lowerCamelCase_ : str = key.replace("opt_proj" , "language_projection")
if "t5_proj" in key:
lowerCamelCase_ : Union[str, Any] = key.replace("t5_proj" , "language_projection")
if key.startswith("opt"):
lowerCamelCase_ : str = key.replace("opt" , "language")
if key.startswith("t5"):
lowerCamelCase_ : Union[str, Any] = key.replace("t5" , "language")
lowerCamelCase_ : Tuple = val
# read in qv biases
read_in_q_v_bias(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : Optional[int] = hf_model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_)
assert len(lowerCAmelCase_) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCamelCase_ : int = load_demo_image()
lowerCamelCase_ : Union[str, Any] = vis_processors["eval"](lowerCAmelCase_).unsqueeze(0).to(lowerCAmelCase_)
lowerCamelCase_ : int = tokenizer(["\n"] , return_tensors="pt").input_ids.to(lowerCAmelCase_)
# create processor
lowerCamelCase_ : str = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_)
lowerCamelCase_ : int = BlipaProcessor(image_processor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_)
lowerCamelCase_ : Dict = processor(images=lowerCAmelCase_ , return_tensors="pt").pixel_values.to(lowerCAmelCase_)
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_)
original_model.to(lowerCAmelCase_)
hf_model.to(lowerCAmelCase_)
with torch.no_grad():
if "opt" in model_name:
lowerCamelCase_ : List[str] = original_model({"image": original_pixel_values, "text_input": [""]}).logits
lowerCamelCase_ : str = hf_model(lowerCAmelCase_ , lowerCAmelCase_).logits
else:
lowerCamelCase_ : Union[str, Any] = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]}).logits
lowerCamelCase_ : Any = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100)
lowerCamelCase_ : str = hf_model(lowerCAmelCase_ , lowerCAmelCase_ , labels=lowerCAmelCase_).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3])
print("First values of HF logits:" , logits[0, :3, :3])
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCamelCase_ : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=lowerCAmelCase_)
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase_ , atol=1E-4)
elif model_name == "blip2-flan-t5-xl-coco":
lowerCamelCase_ : int = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=lowerCAmelCase_)
else:
# cast to same type
lowerCamelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(lowerCAmelCase_) , lowerCAmelCase_ , atol=1E-2)
print("Looks ok!")
print("Generating a caption...")
lowerCamelCase_ : str = ""
lowerCamelCase_ : Optional[Any] = tokenizer(lowerCAmelCase_ , return_tensors="pt").input_ids.to(lowerCAmelCase_)
lowerCamelCase_ : Tuple = original_model.generate({"image": original_pixel_values})
lowerCamelCase_ : Union[str, Any] = hf_model.generate(
lowerCAmelCase_ , lowerCAmelCase_ , do_sample=lowerCAmelCase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , lowerCAmelCase_)
lowerCamelCase_ : Dict = input_ids.shape[1]
lowerCamelCase_ : Dict = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCAmelCase_)
lowerCamelCase_ : Any = [text.strip() for text in output_text]
print("HF generation:" , lowerCAmelCase_)
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCAmelCase_)
hf_model.save_pretrained(lowerCAmelCase_)
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""")
hf_model.push_to_hub(F"""nielsr/{model_name}""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
__magic_name__ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__magic_name__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 708 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 709 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__magic_name__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = '''left'''
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : str = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : Dict = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ ):
if self.remove_space:
lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ : str = inputs
lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ )
lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
lowerCamelCase_ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.preprocess_text(a_ )
lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
lowerCamelCase_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : int = cur_pieces[1:]
else:
lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ )
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
lowerCamelCase_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ : Union[str, Any] = "".join(a_ )
lowerCamelCase_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710 |
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int((number_a + number_a) / 2)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(lowerCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowerCamelCase_ : Optional[int] = lower
lowerCamelCase_ : Tuple = higher
lowerCamelCase_ : Union[str, Any] = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_)
last_numbers.append(lowerCAmelCase_)
if answer(lowerCAmelCase_) == "low":
lowerCamelCase_ : Any = number
elif answer(lowerCAmelCase_) == "high":
lowerCamelCase_ : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip())
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
__magic_name__ = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
__magic_name__ = {
'''vinai/phobert-base''': 2_5_6,
'''vinai/phobert-large''': 2_5_6,
}
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCamelCase_ : Optional[Any] = char
lowerCamelCase_ : Optional[Any] = set(lowerCAmelCase_)
return pairs
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , a_ , a_ , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , **a_ , ):
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , **a_ , )
lowerCamelCase_ : Optional[int] = vocab_file
lowerCamelCase_ : Any = merges_file
lowerCamelCase_ : Any = {}
lowerCamelCase_ : Any = 0
lowerCamelCase_ : Union[str, Any] = 1
lowerCamelCase_ : Tuple = 2
lowerCamelCase_ : List[str] = 3
self.add_from_file(a_ )
lowerCamelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(a_ , encoding="utf-8" ) as merges_handle:
lowerCamelCase_ : Optional[Any] = merges_handle.read().split("\n" )[:-1]
lowerCamelCase_ : Tuple = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCamelCase_ : Tuple = dict(zip(a_ , range(len(a_ ) ) ) )
lowerCamelCase_ : List[str] = {}
def _UpperCamelCase ( self , a_ , a_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[int] = [self.sep_token_id]
lowerCamelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self ):
return len(self.encoder )
def _UpperCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase ( self , a_ ):
if token in self.cache:
return self.cache[token]
lowerCamelCase_ : Tuple = tuple(a_ )
lowerCamelCase_ : int = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowerCamelCase_ : List[Any] = get_pairs(a_ )
if not pairs:
return token
while True:
lowerCamelCase_ : Tuple = min(a_ , key=lambda a_ : self.bpe_ranks.get(a_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ : Any = bigram
lowerCamelCase_ : int = []
lowerCamelCase_ : Optional[Any] = 0
while i < len(a_ ):
try:
lowerCamelCase_ : Tuple = word.index(a_ , a_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ : Dict = j
if word[i] == first and i < len(a_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ : Optional[Any] = tuple(a_ )
lowerCamelCase_ : List[Any] = new_word
if len(a_ ) == 1:
break
else:
lowerCamelCase_ : Union[str, Any] = get_pairs(a_ )
lowerCamelCase_ : Tuple = "@@ ".join(a_ )
lowerCamelCase_ : Union[str, Any] = word[:-4]
lowerCamelCase_ : Any = word
return word
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : str = []
lowerCamelCase_ : Dict = re.findall(R"\S+\n?" , a_ )
for token in words:
split_tokens.extend(list(self.bpe(a_ ).split(" " ) ) )
return split_tokens
def _UpperCamelCase ( self , a_ ):
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def _UpperCamelCase ( self , a_ ):
return self.decoder.get(a_ , self.unk_token )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Any = " ".join(a_ ).replace("@@ " , "" ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Optional[int] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ : Optional[Any] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(a_ ):
copyfile(self.merges_file , a_ )
return out_vocab_file, out_merge_file
def _UpperCamelCase ( self , a_ ):
if isinstance(a_ , a_ ):
try:
with open(a_ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(a_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
lowerCamelCase_ : Tuple = f.readlines()
for lineTmp in lines:
lowerCamelCase_ : Dict = lineTmp.strip()
lowerCamelCase_ : Optional[Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
lowerCamelCase_ : Any = line[:idx]
lowerCamelCase_ : Optional[Any] = len(self.encoder )
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73 | 0 |
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : str = '''unispeech'''
def __init__( self , a_=32 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.1 , a_=0.1 , a_=0.02 , a_=1E-5 , a_="group" , a_="gelu" , a_=(512, 512, 512, 512, 512, 512, 512) , a_=(5, 2, 2, 2, 2, 2, 2) , a_=(10, 3, 3, 3, 3, 2, 2) , a_=False , a_=128 , a_=16 , a_=False , a_=True , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_=320 , a_=2 , a_=0.1 , a_=100 , a_=256 , a_=256 , a_=0.1 , a_="mean" , a_=False , a_=False , a_=256 , a_=80 , a_=0 , a_=1 , a_=2 , a_=0.5 , **a_ , ):
super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ )
lowerCamelCase_ : Union[str, Any] = hidden_size
lowerCamelCase_ : Any = feat_extract_norm
lowerCamelCase_ : List[Any] = feat_extract_activation
lowerCamelCase_ : Optional[int] = list(a_ )
lowerCamelCase_ : Optional[int] = list(a_ )
lowerCamelCase_ : List[str] = list(a_ )
lowerCamelCase_ : Union[str, Any] = conv_bias
lowerCamelCase_ : Union[str, Any] = num_conv_pos_embeddings
lowerCamelCase_ : Tuple = num_conv_pos_embedding_groups
lowerCamelCase_ : List[Any] = len(self.conv_dim )
lowerCamelCase_ : str = num_hidden_layers
lowerCamelCase_ : List[Any] = intermediate_size
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : str = hidden_dropout
lowerCamelCase_ : Union[str, Any] = attention_dropout
lowerCamelCase_ : List[str] = activation_dropout
lowerCamelCase_ : int = feat_proj_dropout
lowerCamelCase_ : Any = final_dropout
lowerCamelCase_ : Optional[int] = layerdrop
lowerCamelCase_ : Any = layer_norm_eps
lowerCamelCase_ : List[str] = initializer_range
lowerCamelCase_ : Dict = num_ctc_classes
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : Any = do_stable_layer_norm
lowerCamelCase_ : List[Any] = use_weighted_layer_sum
lowerCamelCase_ : Union[str, Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ : Optional[Any] = apply_spec_augment
lowerCamelCase_ : Dict = mask_time_prob
lowerCamelCase_ : Union[str, Any] = mask_time_length
lowerCamelCase_ : List[str] = mask_time_min_masks
lowerCamelCase_ : List[str] = mask_feature_prob
lowerCamelCase_ : Union[str, Any] = mask_feature_length
lowerCamelCase_ : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase_ : Any = num_codevectors_per_group
lowerCamelCase_ : List[Any] = num_codevector_groups
lowerCamelCase_ : Dict = contrastive_logits_temperature
lowerCamelCase_ : Union[str, Any] = feat_quantizer_dropout
lowerCamelCase_ : Union[str, Any] = num_negatives
lowerCamelCase_ : Optional[int] = codevector_dim
lowerCamelCase_ : Optional[int] = proj_codevector_dim
lowerCamelCase_ : Any = diversity_loss_weight
# ctc loss
lowerCamelCase_ : Optional[Any] = ctc_loss_reduction
lowerCamelCase_ : int = ctc_zero_infinity
# pretraining loss
lowerCamelCase_ : Dict = replace_prob
@property
def _UpperCamelCase ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 713 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
__UpperCAmelCase : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def _UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__UpperCAmelCase : float = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
def _UpperCamelCase ( self ):
if self.train_file is not None:
lowerCamelCase_ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f:
lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())]
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCamelCase_ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Any = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split(".")[-1]
if extension == "txt":
lowerCamelCase_ : List[str] = "text"
lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(F"""New config: {config}""")
lowerCamelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name.")
if model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_)
model.resize_token_embeddings(len(lowerCAmelCase_))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Optional[Any] = datasets["train"].column_names
else:
lowerCamelCase_ : Dict = datasets["validation"].column_names
lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_):
# Remove empty lines
lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length)
lowerCamelCase_ : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCamelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCamelCase_ : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCamelCase_ : Dict = model_args.model_name_or_path
else:
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json"))
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase_ : Tuple = trainer.evaluate()
lowerCamelCase_ : str = math.exp(eval_output["eval_loss"])
lowerCamelCase_ : Tuple = perplexity
lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
return results
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 714 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_) , lowerCAmelCase_)
return number - int(lowerCAmelCase_)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 73 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = {}
lowerCamelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , "all_results.json")
if os.path.exists(lowerCAmelCase_):
with open(lowerCAmelCase_ , "r") as f:
lowerCamelCase_ : List[Any] = json.load(lowerCAmelCase_)
else:
raise ValueError(F"""can't find {path}""")
return results
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
import xla_spawn
lowerCamelCase_ : Optional[int] = self.get_auto_remove_tmp_dir()
lowerCamelCase_ : List[Any] = F"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(a_ , "argv" , a_ ):
lowerCamelCase_ : Union[str, Any] = time()
xla_spawn.main()
lowerCamelCase_ : Optional[Any] = time()
lowerCamelCase_ : int = get_results(a_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def _UpperCamelCase ( self ):
import xla_spawn
lowerCamelCase_ : Optional[int] = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(a_ , "argv" , a_ ):
xla_spawn.main()
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = '''conditional_detr'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , a_=True , a_=None , a_=3 , a_=300 , a_=6 , a_=2048 , a_=8 , a_=6 , a_=2048 , a_=8 , a_=0.0 , a_=0.0 , a_=True , a_="relu" , a_=256 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.02 , a_=1.0 , a_=False , a_="sine" , a_="resnet50" , a_=True , a_=False , a_=2 , a_=5 , a_=2 , a_=1 , a_=1 , a_=2 , a_=5 , a_=2 , a_=0.25 , **a_ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCamelCase_ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(a_ , a_ ):
lowerCamelCase_ : Tuple = backbone_config.get("model_type" )
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ : Union[str, Any] = config_class.from_dict(a_ )
lowerCamelCase_ : Any = use_timm_backbone
lowerCamelCase_ : int = backbone_config
lowerCamelCase_ : Optional[int] = num_channels
lowerCamelCase_ : Dict = num_queries
lowerCamelCase_ : List[str] = d_model
lowerCamelCase_ : Dict = encoder_ffn_dim
lowerCamelCase_ : Tuple = encoder_layers
lowerCamelCase_ : Optional[int] = encoder_attention_heads
lowerCamelCase_ : Dict = decoder_ffn_dim
lowerCamelCase_ : Any = decoder_layers
lowerCamelCase_ : Optional[int] = decoder_attention_heads
lowerCamelCase_ : Optional[int] = dropout
lowerCamelCase_ : int = attention_dropout
lowerCamelCase_ : Any = activation_dropout
lowerCamelCase_ : List[Any] = activation_function
lowerCamelCase_ : Dict = init_std
lowerCamelCase_ : Optional[int] = init_xavier_std
lowerCamelCase_ : List[Any] = encoder_layerdrop
lowerCamelCase_ : Dict = decoder_layerdrop
lowerCamelCase_ : List[str] = encoder_layers
lowerCamelCase_ : str = auxiliary_loss
lowerCamelCase_ : List[str] = position_embedding_type
lowerCamelCase_ : List[str] = backbone
lowerCamelCase_ : Optional[int] = use_pretrained_backbone
lowerCamelCase_ : int = dilation
# Hungarian matcher
lowerCamelCase_ : Optional[Any] = class_cost
lowerCamelCase_ : List[str] = bbox_cost
lowerCamelCase_ : Optional[int] = giou_cost
# Loss coefficients
lowerCamelCase_ : Optional[int] = mask_loss_coefficient
lowerCamelCase_ : Optional[int] = dice_loss_coefficient
lowerCamelCase_ : List[str] = cls_loss_coefficient
lowerCamelCase_ : Tuple = bbox_loss_coefficient
lowerCamelCase_ : Any = giou_loss_coefficient
lowerCamelCase_ : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def _UpperCamelCase ( self ):
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self ):
return self.d_model
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase_ : int = self.backbone_config.to_dict()
lowerCamelCase_ : Any = self.__class__.model_type
return output
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _UpperCamelCase ( self ):
return 1E-5
@property
def _UpperCamelCase ( self ):
return 12
| 716 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''luke'''
def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = entity_vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = entity_emb_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[int] = use_entity_aware_attention
lowerCamelCase_ : str = classifier_dropout
| 73 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = BertTokenizer
__UpperCAmelCase : Dict = BertTokenizerFast
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Optional[int] = filter_non_english
def _UpperCamelCase ( self ):
super().setUp()
lowerCamelCase_ : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = "UNwant\u00E9d,running"
lowerCamelCase_ : Optional[int] = "unwanted, running"
return input_text, output_text
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Union[str, Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [9, 6, 7, 12, 10, 11] )
def _UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[str] = self.get_rust_tokenizer()
lowerCamelCase_ : int = "UNwant\u00E9d,running"
lowerCamelCase_ : str = tokenizer.tokenize(a_ )
lowerCamelCase_ : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Optional[Any] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : Dict = self.get_rust_tokenizer()
lowerCamelCase_ : Dict = tokenizer.encode(a_ )
lowerCamelCase_ : str = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
# With lower casing
lowerCamelCase_ : int = self.get_tokenizer(do_lower_case=a_ )
lowerCamelCase_ : Tuple = self.get_rust_tokenizer(do_lower_case=a_ )
lowerCamelCase_ : str = "UNwant\u00E9d,running"
lowerCamelCase_ : List[Any] = tokenizer.tokenize(a_ )
lowerCamelCase_ : Any = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : Union[str, Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Union[str, Any] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : Tuple = tokenizer.encode(a_ )
lowerCamelCase_ : Tuple = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = BasicTokenizer(do_lower_case=a_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = BasicTokenizer()
lowerCamelCase_ : Dict = "a\n'll !!to?'d of, can't."
lowerCamelCase_ : str = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(a_ ) , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCamelCase_ : Union[str, Any] = {}
for i, token in enumerate(a_ ):
lowerCamelCase_ : List[str] = i
lowerCamelCase_ : Optional[Any] = WordpieceTokenizer(vocab=a_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _UpperCamelCase ( self ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _UpperCamelCase ( self ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _UpperCamelCase ( self ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(a_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained("bert-base-uncased" )
lowerCamelCase_ : str = tokenizer.encode("sequence builders" , add_special_tokens=a_ )
lowerCamelCase_ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=a_ )
lowerCamelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(a_ )
lowerCamelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ : str = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
lowerCamelCase_ : str = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCamelCase_ : Dict = tokenizer_r.encode_plus(
a_ , return_attention_mask=a_ , return_token_type_ids=a_ , return_offsets_mapping=a_ , add_special_tokens=a_ , )
lowerCamelCase_ : Any = tokenizer_r.do_lower_case if hasattr(a_ , "do_lower_case" ) else False
lowerCamelCase_ : List[str] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = ["的", "人", "有"]
lowerCamelCase_ : str = "".join(a_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ : Dict = True
lowerCamelCase_ : str = self.tokenizer_class.from_pretrained(a_ , **a_ )
lowerCamelCase_ : List[str] = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
lowerCamelCase_ : Optional[Any] = tokenizer_p.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Dict = tokenizer_r.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Tuple = tokenizer_r.convert_ids_to_tokens(a_ )
lowerCamelCase_ : int = tokenizer_p.convert_ids_to_tokens(a_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
lowerCamelCase_ : Dict = self.tokenizer_class.from_pretrained(a_ , **a_ )
lowerCamelCase_ : int = tokenizer_r.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Optional[Any] = tokenizer_p.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : str = tokenizer_r.convert_ids_to_tokens(a_ )
lowerCamelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(a_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ : Optional[Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(a_ )
]
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , a_ )
| 717 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id")
lowerCamelCase_ : Dict = partition_df.collect()
lowerCamelCase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , a_ , a_=None , ):
lowerCamelCase_ : Dict = df
lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = SparkConfig
def __init__( self , a_ , a_ = None , a_ = None , **a_ , ):
import pyspark
lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ : Optional[Any] = df
lowerCamelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(a_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , a_ ):
import pyspark
def get_arrow_batch_size(a_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowerCamelCase_ : str = self.df.count()
lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ : int = self.df.repartition(a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
import pyspark
lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowerCamelCase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ : int = self.config.features
lowerCamelCase_ : Any = self._writer_batch_size
lowerCamelCase_ : Tuple = self._fs.storage_options
def write_arrow(a_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowerCamelCase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowerCamelCase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ):
self._validate_cache_dir()
lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs )
lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join
lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCamelCase_ : int = path_join(self._output_dir , a_ )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowerCamelCase_ : Dict = total_num_examples
lowerCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a_ , a_ , a_ , ):
rename(
a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Dict = 0
for i in range(len(a_ ) ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def _UpperCamelCase ( self , a_ , ):
return SparkExamplesIterable(self.df )
| 73 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 718 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(a_ ):
lowerCamelCase_ : List[Any] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
lowerCamelCase_ : Any = FlaxAutoModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def _UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(a_ ):
lowerCamelCase_ : List[Any] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
lowerCamelCase_ : List[str] = FlaxAutoModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def _UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(a_ )
lowerCamelCase_ : str = FlaxBertModel.from_pretrained(a_ )
lowerCamelCase_ : List[str] = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**a_ ):
return model(**a_ )
eval(**a_ ).block_until_ready()
@slow
def _UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(a_ )
lowerCamelCase_ : Any = FlaxRobertaModel.from_pretrained(a_ )
lowerCamelCase_ : List[str] = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**a_ ):
return model(**a_ )
eval(**a_ ).block_until_ready()
def _UpperCamelCase ( self ):
with self.assertRaisesRegex(
a_ , "bert-base is not a local folder and is not a valid model identifier" ):
lowerCamelCase_ : Optional[Any] = FlaxAutoModel.from_pretrained("bert-base" )
def _UpperCamelCase ( self ):
with self.assertRaisesRegex(
a_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCamelCase_ : int = FlaxAutoModel.from_pretrained(a_ , revision="aaaaaa" )
def _UpperCamelCase ( self ):
with self.assertRaisesRegex(
a_ , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
lowerCamelCase_ : List[Any] = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def _UpperCamelCase ( self ):
with self.assertRaisesRegex(a_ , "Use `from_pt=True` to load this model" ):
lowerCamelCase_ : List[str] = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 719 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ctrl'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Optional[int] = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Union[str, Any] = n_head
lowerCamelCase_ : str = dff
lowerCamelCase_ : Tuple = resid_pdrop
lowerCamelCase_ : Any = embd_pdrop
lowerCamelCase_ : Dict = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Any = use_cache
super().__init__(**a_ )
| 73 | 0 |
def __magic_name__ ( lowerCAmelCase_ = 50):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = [[0] * 3 for _ in range(length + 1)]
for row_length in range(length + 1):
for tile_length in range(2 , 5):
for tile_start in range(row_length - tile_length + 1):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length])
if __name__ == "__main__":
print(f'''{solution() = }''')
| 720 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
__UpperCAmelCase : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def _UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__UpperCAmelCase : float = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
def _UpperCamelCase ( self ):
if self.train_file is not None:
lowerCamelCase_ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f:
lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())]
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCamelCase_ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Any = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split(".")[-1]
if extension == "txt":
lowerCamelCase_ : List[str] = "text"
lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(F"""New config: {config}""")
lowerCamelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name.")
if model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_)
model.resize_token_embeddings(len(lowerCAmelCase_))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Optional[Any] = datasets["train"].column_names
else:
lowerCamelCase_ : Dict = datasets["validation"].column_names
lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_):
# Remove empty lines
lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length)
lowerCamelCase_ : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCamelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCamelCase_ : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCamelCase_ : Dict = model_args.model_name_or_path
else:
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json"))
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase_ : Tuple = trainer.evaluate()
lowerCamelCase_ : str = math.exp(eval_output["eval_loss"])
lowerCamelCase_ : Tuple = perplexity
lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
return results
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 73 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''lxmert'''
__UpperCAmelCase : int = {}
def __init__( self , a_=3_0522 , a_=768 , a_=12 , a_=9500 , a_=1600 , a_=400 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=2048 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ):
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : Optional[Any] = num_attention_heads
lowerCamelCase_ : int = hidden_act
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : List[Any] = attention_probs_dropout_prob
lowerCamelCase_ : List[str] = max_position_embeddings
lowerCamelCase_ : Union[str, Any] = type_vocab_size
lowerCamelCase_ : Dict = initializer_range
lowerCamelCase_ : int = layer_norm_eps
lowerCamelCase_ : List[str] = num_qa_labels
lowerCamelCase_ : List[str] = num_object_labels
lowerCamelCase_ : str = num_attr_labels
lowerCamelCase_ : Dict = l_layers
lowerCamelCase_ : Dict = x_layers
lowerCamelCase_ : Any = r_layers
lowerCamelCase_ : str = visual_feat_dim
lowerCamelCase_ : str = visual_pos_dim
lowerCamelCase_ : List[str] = visual_loss_normalizer
lowerCamelCase_ : Tuple = task_matched
lowerCamelCase_ : Any = task_mask_lm
lowerCamelCase_ : List[str] = task_obj_predict
lowerCamelCase_ : List[str] = task_qa
lowerCamelCase_ : Any = visual_obj_loss
lowerCamelCase_ : Dict = visual_attr_loss
lowerCamelCase_ : Any = visual_feat_loss
lowerCamelCase_ : Any = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**a_ )
| 721 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
# setable values
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _UpperCamelCase ( cls ):
return cls()
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ):
pass
def _UpperCamelCase ( self ):
return KarrasVeSchedulerState.create()
def _UpperCamelCase ( self , a_ , a_ , a_ = () ):
lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy()
lowerCamelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 )
lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape )
lowerCamelCase_ : List[str] = sigma + gamma * sigma
lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
raise NotImplementedError()
| 73 | 0 |
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ ):
lowerCamelCase_ : List[str] = n
lowerCamelCase_ : Any = [None] * self.n
lowerCamelCase_ : Dict = 0 # index of the first element
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : Dict = 0
def __len__( self ):
return self.size
def _UpperCamelCase ( self ):
return self.size == 0
def _UpperCamelCase ( self ):
return False if self.is_empty() else self.array[self.front]
def _UpperCamelCase ( self , a_ ):
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
lowerCamelCase_ : Optional[Any] = data
lowerCamelCase_ : Optional[int] = (self.rear + 1) % self.n
self.size += 1
return self
def _UpperCamelCase ( self ):
if self.size == 0:
raise Exception("UNDERFLOW" )
lowerCamelCase_ : Optional[Any] = self.array[self.front]
lowerCamelCase_ : List[str] = None
lowerCamelCase_ : Optional[int] = (self.front + 1) % self.n
self.size -= 1
return temp
| 700 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = StableDiffusionDiffEditPipeline
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__UpperCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : List[str] = frozenset([] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCamelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCamelCase_ : Dict = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ )
lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Tuple = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Union[str, Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : int = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ : int = self.get_dummy_inputs(a_ )
lowerCamelCase_ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0]
lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(a_ , 1E-4 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ )
lowerCamelCase_ : int = pipe.generate_mask(**a_ )
lowerCamelCase_ : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ : List[str] = np.array([0] * 9 )
lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : Dict = pipe.invert(**a_ ).images
lowerCamelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Dict = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def _UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ )
lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ )
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : str = pipe.invert(**a_ ).images
lowerCamelCase_ : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _UpperCamelCase ( cls ):
lowerCamelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase_ : List[Any] = raw_image
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : str = "a bowl of fruit"
lowerCamelCase_ : Optional[int] = "a bowl of pears"
lowerCamelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents
lowerCamelCase_ : List[str] = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = "a bowl of fruit"
lowerCamelCase_ : Dict = "a bowl of pears"
lowerCamelCase_ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents
lowerCamelCase_ : Any = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 73 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCAmelCase__ ( ctypes.Structure ):
"""simple docstring"""
__UpperCAmelCase : str = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def __magic_name__ ( ):
'''simple docstring'''
if os.name == "nt":
lowerCamelCase_ : List[Any] = CursorInfo()
lowerCamelCase_ : str = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCAmelCase_ , ctypes.byref(lowerCAmelCase_))
lowerCamelCase_ : Union[str, Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCAmelCase_ , ctypes.byref(lowerCAmelCase_))
elif os.name == "posix":
sys.stdout.write("\033[?25l")
sys.stdout.flush()
def __magic_name__ ( ):
'''simple docstring'''
if os.name == "nt":
lowerCamelCase_ : str = CursorInfo()
lowerCamelCase_ : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCAmelCase_ , ctypes.byref(lowerCAmelCase_))
lowerCamelCase_ : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCAmelCase_ , ctypes.byref(lowerCAmelCase_))
elif os.name == "posix":
sys.stdout.write("\033[?25h")
sys.stdout.flush()
@contextmanager
def __magic_name__ ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 701 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 73 | 0 |
from __future__ import annotations
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ ):
lowerCamelCase_ : List[str] = data
lowerCamelCase_ : Node | None = None
lowerCamelCase_ : Node | None = None
def __magic_name__ ( lowerCAmelCase_): # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left)
print(tree.data)
display(tree.right)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left) , depth_of_tree(tree.right)) if tree else 0
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left) and is_full_binary_tree(tree.right)
else:
return not tree.left and not tree.right
def __magic_name__ ( ): # Main function for testing.
'''simple docstring'''
lowerCamelCase_ : Any = Node(1)
lowerCamelCase_ : Dict = Node(2)
lowerCamelCase_ : Any = Node(3)
lowerCamelCase_ : str = Node(4)
lowerCamelCase_ : Tuple = Node(5)
lowerCamelCase_ : Optional[Any] = Node(6)
lowerCamelCase_ : Tuple = Node(7)
lowerCamelCase_ : str = Node(8)
lowerCamelCase_ : str = Node(9)
print(is_full_binary_tree(lowerCAmelCase_))
print(depth_of_tree(lowerCAmelCase_))
print("Tree is: ")
display(lowerCAmelCase_)
if __name__ == "__main__":
main()
| 702 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73 | 0 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__magic_name__ = False
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
lowerCamelCase_ : Optional[int] = torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = pipe(
image=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
lowerCamelCase_ : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : List[Any] = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 703 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ):
lowerCamelCase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
lowerCamelCase_ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[int] = {}
if accepts_eta:
lowerCamelCase_ : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample
lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 73 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_0_2_4,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : List[int] = []
def __init__( self , a_ , a_ , a_="<s>" , a_="</s>" , a_="<pad>" , a_="<unk>" , a_=False , a_=False , a_=None , a_=None , a_ = None , **a_ , ):
lowerCamelCase_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , do_upper_case=a_ , do_lower_case=a_ , tgt_lang=a_ , lang_codes=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : Tuple = do_upper_case
lowerCamelCase_ : Tuple = do_lower_case
lowerCamelCase_ : Optional[int] = load_json(a_ )
lowerCamelCase_ : Tuple = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ : str = spm_file
lowerCamelCase_ : int = load_spm(a_ , self.sp_model_kwargs )
if lang_codes is not None:
lowerCamelCase_ : Tuple = lang_codes
lowerCamelCase_ : Union[str, Any] = LANGUAGES[lang_codes]
lowerCamelCase_ : Any = [F"""<lang:{lang}>""" for lang in self.langs]
lowerCamelCase_ : Optional[Any] = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
lowerCamelCase_ : int = self.lang_tokens
lowerCamelCase_ : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCamelCase_ : List[Any] = {}
@property
def _UpperCamelCase ( self ):
return len(self.encoder )
@property
def _UpperCamelCase ( self ):
return self._tgt_lang
@tgt_lang.setter
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Tuple = new_tgt_lang
self.set_tgt_lang_special_tokens(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Union[str, Any] = self.lang_code_to_id[tgt_lang]
lowerCamelCase_ : Optional[Any] = [lang_code_id]
def _UpperCamelCase ( self , a_ ):
return self.sp_model.encode(a_ , out_type=a_ )
def _UpperCamelCase ( self , a_ ):
return self.encoder.get(a_ , self.encoder[self.unk_token] )
def _UpperCamelCase ( self , a_ ):
return self.decoder.get(a_ , self.unk_token )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCamelCase_ : Dict = self.sp_model.decode(a_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCamelCase_ : List[str] = []
else:
current_sub_tokens.append(a_ )
lowerCamelCase_ : Union[str, Any] = self.sp_model.decode(a_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _UpperCamelCase ( self , a_ , a_=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
lowerCamelCase_ : List[str] = [1] * len(self.prefix_tokens )
lowerCamelCase_ : str = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a_ )) + suffix_ones
return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Optional[int] = self.__dict__.copy()
lowerCamelCase_ : List[str] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : Any = {}
lowerCamelCase_ : List[str] = load_spm(self.spm_file , self.sp_model_kwargs )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : str = Path(a_ )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
lowerCamelCase_ : Optional[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowerCamelCase_ : Union[str, Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(a_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a_ )
elif not os.path.isfile(self.spm_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (str(a_ ), str(a_ ))
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = sentencepiece.SentencePieceProcessor(**lowerCAmelCase_)
spm.Load(str(lowerCAmelCase_))
return spm
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r") as f:
return json.load(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "w") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=2)
| 704 |
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , *a_ , **a_ ):
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 705 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 0 |
import math
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = [True] * n
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Optional[int] = True
for i in range(3 , int(n**0.5 + 1) , 2):
lowerCamelCase_ : Dict = i * 2
while index < n:
lowerCamelCase_ : Any = False
lowerCamelCase_ : int = index + i
lowerCamelCase_ : List[str] = [2]
for i in range(3 , lowerCAmelCase_ , 2):
if is_prime[i]:
primes.append(lowerCAmelCase_)
return primes
def __magic_name__ ( lowerCAmelCase_ = 9999_6666_3333):
'''simple docstring'''
lowerCamelCase_ : Tuple = math.floor(math.sqrt(lowerCAmelCase_)) + 100
lowerCamelCase_ : List[str] = prime_sieve(lowerCAmelCase_)
lowerCamelCase_ : Any = 0
lowerCamelCase_ : str = 0
lowerCamelCase_ : List[Any] = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ : str = primes[prime_index + 1]
lowerCamelCase_ : Optional[Any] = last_prime**2
lowerCamelCase_ : Union[str, Any] = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ : Dict = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ : Dict = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ : Dict = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ : List[str] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 706 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73 | 0 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return getitem, k
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return setitem, k, v
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return delitem, k
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_):
'''simple docstring'''
try:
return fun(lowerCAmelCase_ , *lowerCAmelCase_), None
except Exception as e:
return None, e
__magic_name__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
__magic_name__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
__magic_name__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
__magic_name__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
__magic_name__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__magic_name__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = HashMap(initial_block_size=4)
lowerCamelCase_ : List[Any] = {}
for _, (fun, *args) in enumerate(lowerCAmelCase_):
lowerCamelCase_ : str = _run_operation(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_)
lowerCamelCase_ : Union[str, Any] = _run_operation(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_)
assert my_res == py_res
assert str(lowerCAmelCase_) == str(lowerCAmelCase_)
assert set(lowerCAmelCase_) == set(lowerCAmelCase_)
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
assert set(my.items()) == set(py.items())
def __magic_name__ ( ):
'''simple docstring'''
def is_public(lowerCAmelCase_) -> bool:
return not name.startswith("_")
lowerCamelCase_ : Any = {name for name in dir({}) if is_public(lowerCAmelCase_)}
lowerCamelCase_ : Optional[int] = {name for name in dir(HashMap()) if is_public(lowerCAmelCase_)}
assert dict_public_names > hash_public_names
| 707 |
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 73 | 0 |
'''simple docstring'''
from math import isqrt, loga
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[str] = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase_ , lowerCAmelCase_):
lowerCamelCase_ : Optional[int] = False
return [i for i in range(2 , lowerCAmelCase_) if is_prime[i]]
def __magic_name__ ( lowerCAmelCase_ = 80_0800 , lowerCAmelCase_ = 80_0800):
'''simple docstring'''
lowerCamelCase_ : int = degree * loga(lowerCAmelCase_)
lowerCamelCase_ : int = int(lowerCAmelCase_)
lowerCamelCase_ : Any = calculate_prime_numbers(lowerCAmelCase_)
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : str = len(lowerCAmelCase_) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 708 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
def is_in_circle(lowerCAmelCase_ , lowerCAmelCase_) -> bool:
lowerCamelCase_ : Optional[int] = sqrt((x**2) + (y**2))
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCamelCase_ : str = mean(
int(is_in_circle(uniform(-1.0 , 1.0) , uniform(-1.0 , 1.0)))
for _ in range(lowerCAmelCase_))
# The ratio of the area for circle to square is pi/4.
lowerCamelCase_ : List[Any] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""")
print(F"""The numpy value of pi is {pi}""")
print(F"""The total error is {abs(pi - pi_estimate)}""")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(lowerCAmelCase_ , lowerCAmelCase_)) for _ in range(lowerCAmelCase_)) * (max_value - min_value)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0):
'''simple docstring'''
def identity_function(lowerCAmelCase_) -> float:
return x
lowerCamelCase_ : str = area_under_curve_estimator(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : int = (max_value * max_value - min_value * min_value) / 2
print("******************")
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""")
print(F"""Estimated value is {estimated_value}""")
print(F"""Expected value is {expected_value}""")
print(F"""Total error is {abs(estimated_value - expected_value)}""")
print("******************")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
def function_to_integrate(lowerCAmelCase_) -> float:
return sqrt(4.0 - x * x)
lowerCamelCase_ : Dict = area_under_curve_estimator(
lowerCAmelCase_ , lowerCAmelCase_ , 0.0 , 2.0)
print("******************")
print("Estimating pi using area_under_curve_estimator")
print(F"""Estimated value is {estimated_value}""")
print(F"""Expected value is {pi}""")
print(F"""Total error is {abs(estimated_value - pi)}""")
print("******************")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__magic_name__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = '''left'''
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : str = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : Dict = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ ):
if self.remove_space:
lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ : str = inputs
lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ )
lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
lowerCamelCase_ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.preprocess_text(a_ )
lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
lowerCamelCase_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : int = cur_pieces[1:]
else:
lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ )
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
lowerCamelCase_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ : Union[str, Any] = "".join(a_ )
lowerCamelCase_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=False):
'''simple docstring'''
try:
lowerCamelCase_ : Any = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase_ : int = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase_ : str = strtobool(lowerCAmelCase_)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""")
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
lowerCamelCase_ : List[Any] = unittest.skip("test requires faiss")(lowerCAmelCase_)
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
lowerCamelCase_ : List[Any] = unittest.skip("test requires regex")(lowerCAmelCase_)
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
lowerCamelCase_ : List[str] = unittest.skip("test requires elasticsearch")(lowerCAmelCase_)
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
lowerCamelCase_ : List[Any] = unittest.skip("test requires sqlalchemy")(lowerCAmelCase_)
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
lowerCamelCase_ : int = unittest.skip("test requires PyTorch")(lowerCAmelCase_)
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not config.TF_AVAILABLE:
lowerCamelCase_ : Optional[int] = unittest.skip("test requires TensorFlow")(lowerCAmelCase_)
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not config.JAX_AVAILABLE:
lowerCamelCase_ : Union[str, Any] = unittest.skip("test requires JAX")(lowerCAmelCase_)
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not config.PIL_AVAILABLE:
lowerCamelCase_ : List[Any] = unittest.skip("test requires Pillow")(lowerCAmelCase_)
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(lowerCAmelCase_)
else:
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(lowerCAmelCase_)
else:
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(lowerCAmelCase_)
else:
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
def _require_spacy_model(lowerCAmelCase_):
try:
import spacy # noqa F401
spacy.load(lowerCAmelCase_)
except ImportError:
return unittest.skip("test requires spacy")(lowerCAmelCase_)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(lowerCAmelCase_))(lowerCAmelCase_)
else:
return test_case
return _require_spacy_model
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(lowerCAmelCase_)
else:
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(lowerCAmelCase_)
else:
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
lowerCamelCase_ : Union[str, Any] = unittest.skip("test is slow")(lowerCAmelCase_)
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
lowerCamelCase_ : int = unittest.skip("test is local")(lowerCAmelCase_)
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCamelCase_ : Tuple = unittest.skip("test is packaged")(lowerCAmelCase_)
return test_case
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
lowerCamelCase_ : List[str] = unittest.skip("test requires remote")(lowerCAmelCase_)
return test_case
def __magic_name__ ( *lowerCAmelCase_):
'''simple docstring'''
def decorate(cls):
for name, fn in cls.__dict__.items():
if callable(lowerCAmelCase_) and name.startswith("test"):
for decorator in decorators:
lowerCamelCase_ : Dict = decorator(lowerCAmelCase_)
setattr(cls , lowerCAmelCase_ , lowerCAmelCase_)
return cls
return decorate
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
pass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : List[Any] = 2
@contextmanager
def __magic_name__ ( lowerCAmelCase_=OfflineSimulationMode.CONNECTION_FAILS , lowerCAmelCase_=1E-16):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = requests.Session().request
def timeout_request(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_):
# Change the url to an invalid url so that the connection hangs
lowerCamelCase_ : Optional[Any] = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""")
lowerCamelCase_ : Dict = timeout
try:
return online_request(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCamelCase_ : str = url
lowerCamelCase_ : Dict = e.args[0]
lowerCamelCase_ : Dict = (max_retry_error.args[0].replace("10.255.255.1" , F"""OfflineMock[{url}]"""),)
lowerCamelCase_ : Tuple = (max_retry_error,)
raise
def raise_connection_error(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_):
raise requests.ConnectionError("Offline mode is enabled." , request=lowerCAmelCase_)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , lowerCAmelCase_):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , lowerCAmelCase_):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase_):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def __magic_name__ ( *lowerCAmelCase_ , **lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = str(Path().resolve())
with tempfile.TemporaryDirectory(*lowerCAmelCase_ , **lowerCAmelCase_) as tmp_dir:
try:
os.chdir(lowerCAmelCase_)
yield
finally:
os.chdir(lowerCAmelCase_)
@contextmanager
def __magic_name__ ( ):
'''simple docstring'''
import gc
gc.collect()
lowerCamelCase_ : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __magic_name__ ( ):
'''simple docstring'''
import gc
gc.collect()
lowerCamelCase_ : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return deepcopy(lowerCAmelCase_).integers(0 , 100 , 10).tolist() == deepcopy(lowerCAmelCase_).integers(0 , 100 , 10).tolist()
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_):
try:
return func(*lowerCAmelCase_ , **lowerCAmelCase_)
except HTTPError as err:
if str(lowerCAmelCase_).startswith("500") or str(lowerCAmelCase_).startswith("502"):
pytest.xfail(str(lowerCAmelCase_))
raise err
return decorator.decorator(_wrapper , lowerCAmelCase_)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
lowerCamelCase_ : List[str] = returncode
lowerCamelCase_ : Dict = stdout
lowerCamelCase_ : int = stderr
async def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
while True:
lowerCamelCase_ : List[Any] = await stream.readline()
if line:
callback(lowerCAmelCase_)
else:
break
async def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False):
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(lowerCAmelCase_))
lowerCamelCase_ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase_ : str = []
lowerCamelCase_ : Tuple = []
def tee(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=""):
lowerCamelCase_ : Union[str, Any] = line.decode("utf-8").rstrip()
sink.append(lowerCAmelCase_)
if not quiet:
print(lowerCAmelCase_ , lowerCAmelCase_ , file=lowerCAmelCase_)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowerCAmelCase_: tee(lowerCAmelCase_ , lowerCAmelCase_ , sys.stdout , label="stdout:")),
_read_stream(p.stderr , lambda lowerCAmelCase_: tee(lowerCAmelCase_ , lowerCAmelCase_ , sys.stderr , label="stderr:")),
] , timeout=lowerCAmelCase_ , )
return _RunOutput(await p.wait() , lowerCAmelCase_ , lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=180 , lowerCAmelCase_=False , lowerCAmelCase_=True):
'''simple docstring'''
lowerCamelCase_ : List[Any] = asyncio.get_event_loop()
lowerCamelCase_ : List[str] = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase_ , env=lowerCAmelCase_ , stdin=lowerCAmelCase_ , timeout=lowerCAmelCase_ , quiet=lowerCAmelCase_ , echo=lowerCAmelCase_))
lowerCamelCase_ : int = " ".join(lowerCAmelCase_)
if result.returncode > 0:
lowerCamelCase_ : Any = "\n".join(result.stderr)
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""")
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""")
return result
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = os.environ.get("PYTEST_XDIST_WORKER" , "gw0")
lowerCamelCase_ : Dict = re.sub(R"^gw" , "" , lowerCAmelCase_ , 0 , re.M)
return int(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = 2_9500
lowerCamelCase_ : Optional[Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 710 |
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int((number_a + number_a) / 2)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(lowerCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowerCamelCase_ : Optional[int] = lower
lowerCamelCase_ : Tuple = higher
lowerCamelCase_ : Union[str, Any] = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_)
last_numbers.append(lowerCAmelCase_)
if answer(lowerCAmelCase_) == "low":
lowerCamelCase_ : Any = number
elif answer(lowerCAmelCase_) == "high":
lowerCamelCase_ : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip())
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
lowerCamelCase_ : Dict = dataset
lowerCamelCase_ : Any = process
lowerCamelCase_ : List[Any] = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , a_ ):
lowerCamelCase_ : Optional[Any] = self.dataset[i]
lowerCamelCase_ : List[str] = self.process(a_ , **self.params )
return processed
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ , a_=None ):
lowerCamelCase_ : int = loader
lowerCamelCase_ : Optional[int] = infer
lowerCamelCase_ : Union[str, Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : List[Any] = loader_batch_size
# Internal bookkeeping
lowerCamelCase_ : str = None
lowerCamelCase_ : List[Any] = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
lowerCamelCase_ : Any = iter(self.loader )
return self
def _UpperCamelCase ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase_ : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase_ : Optional[int] = {}
for k, element in self._loader_batch_data.items():
if isinstance(a_ , a_ ):
# Convert ModelOutput to tuple first
lowerCamelCase_ : Union[str, Any] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase_ : Optional[int] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase_ : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(a_ , a_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase_ : Optional[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase_ : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase_ : Dict = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase_ : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase_ : Any = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase_ : Optional[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase_ : List[Any] = self._loader_batch_data.__class__(a_ )
self._loader_batch_index += 1
return result
def _UpperCamelCase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase_ : str = next(self.iterator )
lowerCamelCase_ : Optional[Any] = self.infer(a_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(a_ , torch.Tensor ):
lowerCamelCase_ : Union[str, Any] = processed
else:
lowerCamelCase_ : str = list(processed.keys() )[0]
lowerCamelCase_ : Any = processed[key]
if isinstance(a_ , a_ ):
lowerCamelCase_ : Tuple = len(a_ )
else:
lowerCamelCase_ : Optional[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase_ : List[str] = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase_ : Any = processed
lowerCamelCase_ : Dict = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ , a_=None ):
super().__init__(a_ , a_ , a_ )
def __iter__( self ):
lowerCamelCase_ : Optional[int] = iter(self.loader )
lowerCamelCase_ : List[str] = None
return self
def _UpperCamelCase ( self ):
if self.subiterator is None:
lowerCamelCase_ : Any = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase_ : str = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase_ : Tuple = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase_ : Dict = next(self.subiterator )
return processed
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __iter__( self ):
lowerCamelCase_ : Dict = iter(self.loader )
return self
def _UpperCamelCase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase_ : Tuple = self.loader_batch_item()
lowerCamelCase_ : Tuple = item.pop("is_last" )
accumulator.append(a_ )
if is_last:
return accumulator
while not is_last:
lowerCamelCase_ : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(a_ , torch.Tensor ):
lowerCamelCase_ : List[str] = processed
else:
lowerCamelCase_ : Union[str, Any] = list(processed.keys() )[0]
lowerCamelCase_ : Any = processed[key]
if isinstance(a_ , a_ ):
lowerCamelCase_ : List[str] = len(a_ )
else:
lowerCamelCase_ : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase_ : str = observed_batch_size
lowerCamelCase_ : Optional[int] = processed
lowerCamelCase_ : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase_ : Union[str, Any] = self.loader_batch_item()
lowerCamelCase_ : Tuple = item.pop("is_last" )
accumulator.append(a_ )
if is_last:
return accumulator
else:
lowerCamelCase_ : Any = processed
lowerCamelCase_ : Dict = item.pop("is_last" )
accumulator.append(a_ )
return accumulator
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ ):
lowerCamelCase_ : int = dataset
lowerCamelCase_ : Dict = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , a_ ):
return self.dataset[i][self.key]
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
lowerCamelCase_ : Optional[Any] = dataset
lowerCamelCase_ : Optional[int] = keya
lowerCamelCase_ : str = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , a_ ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Tuple = 3
lowerCamelCase_ : List[Any] = 250
lowerCamelCase_ : Optional[int] = ids_tensor((batch_size, length) , a_ )
lowerCamelCase_ : List[str] = torch.ones((batch_size, length) , device=a_ , dtype=torch.float ) / length
return input_ids, scores
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self._get_tensors(5 )
lowerCamelCase_ : str = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(a_ , a_ ) )
lowerCamelCase_ : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(a_ , a_ ) )
lowerCamelCase_ : Any = self._get_tensors(10 )
self.assertTrue(criteria(a_ , a_ ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = MaxLengthCriteria(max_length=10 )
lowerCamelCase_ : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(a_ , a_ ) )
lowerCamelCase_ : Any = self._get_tensors(9 )
self.assertFalse(criteria(a_ , a_ ) )
lowerCamelCase_ : Dict = self._get_tensors(10 )
self.assertTrue(criteria(a_ , a_ ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCamelCase_ : str = self._get_tensors(5 )
self.assertFalse(criteria(a_ , a_ ) )
lowerCamelCase_ : List[Any] = self._get_tensors(9 )
self.assertFalse(criteria(a_ , a_ ) )
lowerCamelCase_ : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(a_ , a_ ) )
lowerCamelCase_ : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self._get_tensors(5 )
lowerCamelCase_ : Optional[Any] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(a_ , a_ ) )
lowerCamelCase_ : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(a_ , a_ ) )
def _UpperCamelCase ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(a_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCamelCase_ : Tuple = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(a_ ) , 1 )
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__magic_name__ = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__magic_name__ = {
'''RUCAIBox/mvp''': 1_0_2_4,
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : List[str] = MvpTokenizer
def __init__( self , a_=None , a_=None , a_=None , a_="replace" , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=False , a_=True , **a_ , ):
super().__init__(
a_ , a_ , tokenizer_file=a_ , errors=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , trim_offsets=a_ , **a_ , )
lowerCamelCase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCamelCase_ : List[str] = getattr(a_ , pre_tok_state.pop("type" ) )
lowerCamelCase_ : str = add_prefix_space
lowerCamelCase_ : Dict = pre_tok_class(**a_ )
lowerCamelCase_ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase_ : int = "post_processor"
lowerCamelCase_ : List[Any] = getattr(self.backend_tokenizer , a_ , a_ )
if tokenizer_component_instance:
lowerCamelCase_ : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase_ : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
lowerCamelCase_ : List[Any] = tuple(state["cls"] )
lowerCamelCase_ : Any = False
if state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCamelCase_ : Any = add_prefix_space
lowerCamelCase_ : List[Any] = True
if state.get("trim_offsets" , a_ ) != trim_offsets:
lowerCamelCase_ : Optional[Any] = trim_offsets
lowerCamelCase_ : List[Any] = True
if changes_to_apply:
lowerCamelCase_ : List[str] = getattr(a_ , state.pop("type" ) )
lowerCamelCase_ : int = component_class(**a_ )
setattr(self.backend_tokenizer , a_ , a_ )
@property
def _UpperCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else value
lowerCamelCase_ : Optional[int] = value
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Union[str, Any] = kwargs.get("is_split_into_words" , a_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Optional[Any] = kwargs.get("is_split_into_words" , a_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : int = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def _UpperCamelCase ( self , a_ , a_=None ):
lowerCamelCase_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Tuple = [self.sep_token_id]
lowerCamelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 713 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 0 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ ):
lowerCamelCase_ : Optional[int] = str(id_ )
lowerCamelCase_ : Any = None
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self , a_ ):
return self.key < other.key
def __repr__( self ):
return self.id
def _UpperCamelCase ( self , a_ ):
self.neighbors.append(a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = weight
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1])
graph[b - 1].add_neighbor(graph[a - 1])
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase_)
graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[str] = []
for u in graph:
lowerCamelCase_ : List[Any] = math.inf
lowerCamelCase_ : Any = None
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : Tuple = graph[:]
while q:
lowerCamelCase_ : Union[str, Any] = min(lowerCAmelCase_)
q.remove(lowerCAmelCase_)
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCamelCase_ : Tuple = u
lowerCamelCase_ : List[str] = u.edges[v.id]
for i in range(1 , len(lowerCAmelCase_)):
a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
return a
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
for u in graph:
lowerCamelCase_ : Any = math.inf
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[Any] = list(lowerCAmelCase_)
hq.heapify(lowerCAmelCase_)
while h:
lowerCamelCase_ : Union[str, Any] = hq.heappop(lowerCAmelCase_)
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCamelCase_ : List[Any] = u
lowerCamelCase_ : Union[str, Any] = u.edges[v.id]
hq.heapify(lowerCAmelCase_)
for i in range(1 , len(lowerCAmelCase_)):
yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
def __magic_name__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_) , lowerCAmelCase_)
return number - int(lowerCAmelCase_)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 73 | 0 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = 2
lowerCamelCase_ : Any = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_)
if n > 1:
factors.append(lowerCAmelCase_)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = '''▁'''
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = BigBirdTokenizer
__UpperCAmelCase : List[Any] = BigBirdTokenizerFast
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Optional[int] = True
def _UpperCamelCase ( self ):
super().setUp()
lowerCamelCase_ : Optional[Any] = self.tokenizer_class(a_ , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = "<s>"
lowerCamelCase_ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(a_ ) , 1004 )
def _UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Optional[int] = self.get_rust_tokenizer()
lowerCamelCase_ : Dict = "I was born in 92000, and this is falsé."
lowerCamelCase_ : List[str] = tokenizer.tokenize(a_ )
lowerCamelCase_ : Optional[Any] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : int = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Union[str, Any] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : int = self.get_rust_tokenizer()
lowerCamelCase_ : Any = tokenizer.encode(a_ )
lowerCamelCase_ : int = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = BigBirdTokenizer(a_ , keep_accents=a_ )
lowerCamelCase_ : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [285, 46, 10, 170, 382] , )
lowerCamelCase_ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _UpperCamelCase ( self ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = "Hello World!"
lowerCamelCase_ : Tuple = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
lowerCamelCase_ : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def _UpperCamelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCamelCase_ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase_ : List[Any] = " ".join(a_ )
lowerCamelCase_ : Tuple = self.big_tokenizer.encode_plus(a_ , return_tensors="pt" , return_token_type_ids=a_ )
lowerCamelCase_ : str = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=a_ )
lowerCamelCase_ : Optional[Any] = BigBirdConfig(attention_type="original_full" )
lowerCamelCase_ : Any = BigBirdModel(a_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
lowerCamelCase_ : Union[str, Any] = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def _UpperCamelCase ( self ):
# fmt: off
lowerCamelCase_ : Tuple = {"input_ids": [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 716 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''luke'''
def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = entity_vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = entity_emb_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[int] = use_entity_aware_attention
lowerCamelCase_ : str = classifier_dropout
| 73 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__magic_name__ = False
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = pipe.dual_guided(
prompt="first prompt" , image=a_ , text_to_image_strength=0.75 , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
lowerCamelCase_ : List[str] = VersatileDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : List[str] = generator.manual_seed(0 )
lowerCamelCase_ : Optional[int] = pipe.dual_guided(
prompt="first prompt" , image=a_ , text_to_image_strength=0.75 , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = "cyberpunk 2077"
lowerCamelCase_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = pipe.dual_guided(
prompt=a_ , image=a_ , text_to_image_strength=0.75 , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
lowerCamelCase_ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Any = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCamelCase_ : Union[str, Any] = "A painting of a squirrel eating a burger "
lowerCamelCase_ : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = pipe.text_to_image(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowerCamelCase_ : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : str = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCamelCase_ : Union[str, Any] = pipe.image_variation(a_ , generator=a_ , output_type="numpy" ).images
lowerCamelCase_ : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Optional[Any] = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 717 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id")
lowerCamelCase_ : Dict = partition_df.collect()
lowerCamelCase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , a_ , a_=None , ):
lowerCamelCase_ : Dict = df
lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = SparkConfig
def __init__( self , a_ , a_ = None , a_ = None , **a_ , ):
import pyspark
lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ : Optional[Any] = df
lowerCamelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(a_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , a_ ):
import pyspark
def get_arrow_batch_size(a_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowerCamelCase_ : str = self.df.count()
lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ : int = self.df.repartition(a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
import pyspark
lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowerCamelCase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ : int = self.config.features
lowerCamelCase_ : Any = self._writer_batch_size
lowerCamelCase_ : Tuple = self._fs.storage_options
def write_arrow(a_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowerCamelCase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowerCamelCase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ):
self._validate_cache_dir()
lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs )
lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join
lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCamelCase_ : int = path_join(self._output_dir , a_ )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowerCamelCase_ : Dict = total_num_examples
lowerCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a_ , a_ , a_ , ):
rename(
a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Dict = 0
for i in range(len(a_ ) ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def _UpperCamelCase ( self , a_ , ):
return SparkExamplesIterable(self.df )
| 73 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCamelCase_ : str = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCamelCase_ : Optional[int] = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCamelCase_ : str = tf_top_k_top_p_filtering(a_ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
lowerCamelCase_ : Union[str, Any] = output[output != -float("inf" )]
lowerCamelCase_ : List[Any] = tf.cast(
tf.where(tf.not_equal(a_ , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(a_ , a_ , rtol=1E-12 )
tf.debugging.assert_equal(a_ , a_ )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase, __lowerCamelCase ):
"""simple docstring"""
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
__UpperCAmelCase : Tuple = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def _UpperCamelCase ( self ):
# TF-only test: tf.saved_model export
lowerCamelCase_ : Tuple = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCamelCase_ : Dict = 2
lowerCamelCase_ : int = 2
class lowerCAmelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , a_ ):
super(a_ , self ).__init__()
lowerCamelCase_ : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=a_ , )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : int = self.model.generate(
input_ids=a_ , attention_mask=a_ , max_new_tokens=a_ , return_dict_in_generate=a_ , )
return {"sequences": outputs["sequences"]}
lowerCamelCase_ : int = [[2, 0], [102, 103]]
lowerCamelCase_ : Optional[Any] = [[1, 0], [1, 1]]
lowerCamelCase_ : Dict = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ , a_ , signatures={"serving_default": dummy_model.serving} )
lowerCamelCase_ : List[str] = tf.saved_model.load(a_ ).signatures["serving_default"]
for batch_size in range(1 , len(a_ ) + 1 ):
lowerCamelCase_ : Tuple = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCamelCase_ : Dict = serving_func(**a_ )["sequences"]
lowerCamelCase_ : str = test_model.generate(**a_ , max_new_tokens=a_ )
tf.debugging.assert_equal(a_ , a_ )
@slow
def _UpperCamelCase ( self ):
# TF-only test: tf.saved_model export
lowerCamelCase_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCamelCase_ : int = 1
lowerCamelCase_ : Tuple = 2
class lowerCAmelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , a_ ):
super(a_ , self ).__init__()
lowerCamelCase_ : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=a_ , )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.model.generate(
input_ids=a_ , attention_mask=a_ , max_new_tokens=a_ , return_dict_in_generate=a_ , )
return {"sequences": outputs["sequences"]}
lowerCamelCase_ : Optional[int] = [[2], [102, 103]]
lowerCamelCase_ : Union[str, Any] = [[1], [1, 1]]
lowerCamelCase_ : List[Any] = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ , a_ , signatures={"serving_default": dummy_model.serving} )
lowerCamelCase_ : Optional[Any] = tf.saved_model.load(a_ ).signatures["serving_default"]
for input_row in range(len(a_ ) ):
lowerCamelCase_ : List[str] = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCamelCase_ : Optional[Any] = serving_func(**a_ )["sequences"]
lowerCamelCase_ : Optional[Any] = test_model.generate(**a_ , max_new_tokens=a_ )
tf.debugging.assert_equal(a_ , a_ )
@slow
@require_tensorflow_text
def _UpperCamelCase ( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=a_ )
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self ):
super().__init__()
lowerCamelCase_ : int = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a_ , "spiece.model" ) , "rb" ).read() )
lowerCamelCase_ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def _UpperCamelCase ( self , a_ , *a_ , **a_ ):
lowerCamelCase_ : int = self.tokenizer.tokenize(a_ )
lowerCamelCase_ : List[Any] = text.pad_model_inputs(
a_ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
lowerCamelCase_ : Tuple = self.model.generate(input_ids=a_ , attention_mask=a_ )
return self.tokenizer.detokenize(a_ )
lowerCamelCase_ : Dict = CompleteSentenceTransformer()
lowerCamelCase_ : List[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
lowerCamelCase_ : str = complete_model(a_ )
lowerCamelCase_ : Any = tf.keras.Model(a_ , a_ )
keras_model.save(a_ )
def _UpperCamelCase ( self ):
# Has PT equivalent: this test relies on random sampling
lowerCamelCase_ : Optional[Any] = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
lowerCamelCase_ : Tuple = 14
lowerCamelCase_ : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCamelCase_ : Dict = "Hello, my dog is cute and"
lowerCamelCase_ : Dict = tokenizer(a_ , return_tensors="tf" )
lowerCamelCase_ : Tuple = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowerCamelCase_ : int = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
lowerCamelCase_ : List[Any] = model.generate(**a_ , eos_token_id=a_ , **a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCamelCase_ : Any = [638, 198]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**a_ , eos_token_id=a_ , **a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _UpperCamelCase ( self ):
# Has PT equivalent: ample use of framework-specific code
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
lowerCamelCase_ : Tuple = "Hugging Face is a technology company based in New York and Paris."
lowerCamelCase_ : Optional[Any] = bart_tokenizer(a_ , return_tensors="tf" ).input_ids
lowerCamelCase_ : str = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
lowerCamelCase_ : Any = bart_model.generate(a_ ).numpy()
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def _UpperCamelCase ( self , a_ , a_=None , **a_ ):
return super().call(a_ , **a_ )
lowerCamelCase_ : Union[str, Any] = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
lowerCamelCase_ : int = bart_model.generate(a_ , foo="bar" ).numpy()
self.assertTrue(np.array_equal(a_ , a_ ) )
class lowerCAmelCase__ ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def _UpperCamelCase ( self , a_ , **a_ ):
return super().call(a_ , **a_ )
lowerCamelCase_ : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
lowerCamelCase_ : int = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCamelCase_ : Optional[Any] = bart_model.generate(a_ ).numpy()
with self.assertRaises(a_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a_ , foo="bar" )
| 719 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ctrl'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Optional[int] = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Union[str, Any] = n_head
lowerCamelCase_ : str = dff
lowerCamelCase_ : Tuple = resid_pdrop
lowerCamelCase_ : Any = embd_pdrop
lowerCamelCase_ : Dict = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Any = use_cache
super().__init__(**a_ )
| 73 | 0 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , *a_ , **a_ ):
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 720 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
__UpperCAmelCase : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def _UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__UpperCAmelCase : float = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
def _UpperCamelCase ( self ):
if self.train_file is not None:
lowerCamelCase_ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f:
lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())]
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCamelCase_ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Any = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split(".")[-1]
if extension == "txt":
lowerCamelCase_ : List[str] = "text"
lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(F"""New config: {config}""")
lowerCamelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name.")
if model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_)
model.resize_token_embeddings(len(lowerCAmelCase_))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Optional[Any] = datasets["train"].column_names
else:
lowerCamelCase_ : Dict = datasets["validation"].column_names
lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_):
# Remove empty lines
lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length)
lowerCamelCase_ : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCamelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCamelCase_ : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCamelCase_ : Dict = model_args.model_name_or_path
else:
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json"))
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase_ : Tuple = trainer.evaluate()
lowerCamelCase_ : str = math.exp(eval_output["eval_loss"])
lowerCamelCase_ : Tuple = perplexity
lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
return results
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 73 | 0 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __magic_name__ ( *lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_=True , lowerCAmelCase_=2):
'''simple docstring'''
from .. import __version__
lowerCamelCase_ : Dict = take_from
lowerCamelCase_ : Tuple = ()
if not isinstance(args[0] , lowerCAmelCase_):
lowerCamelCase_ : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCAmelCase_).base_version) >= version.parse(lowerCAmelCase_):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""")
lowerCamelCase_ : int = None
if isinstance(lowerCAmelCase_ , lowerCAmelCase_) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCAmelCase_),)
lowerCamelCase_ : Tuple = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_):
values += (getattr(lowerCAmelCase_ , lowerCAmelCase_),)
lowerCamelCase_ : Dict = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
lowerCamelCase_ : str = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
lowerCamelCase_ : Tuple = warning + " " if standard_warn else ""
warnings.warn(warning + message , lowerCAmelCase_ , stacklevel=lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_) and len(lowerCAmelCase_) > 0:
lowerCamelCase_ : str = inspect.getouterframes(inspect.currentframe())[1]
lowerCamelCase_ : Optional[int] = call_frame.filename
lowerCamelCase_ : Dict = call_frame.lineno
lowerCamelCase_ : Optional[int] = call_frame.function
lowerCamelCase_ : Any = next(iter(deprecated_kwargs.items()))
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""")
if len(lowerCAmelCase_) == 0:
return
elif len(lowerCAmelCase_) == 1:
return values[0]
return values
| 721 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
# setable values
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _UpperCamelCase ( cls ):
return cls()
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ):
pass
def _UpperCamelCase ( self ):
return KarrasVeSchedulerState.create()
def _UpperCamelCase ( self , a_ , a_ , a_ = () ):
lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy()
lowerCamelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 )
lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape )
lowerCamelCase_ : List[str] = sigma + gamma * sigma
lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
raise NotImplementedError()
| 73 | 0 |
import sys
import turtle
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(lowerCAmelCase_ , get_mid(lowerCAmelCase_ , lowerCAmelCase_) , get_mid(lowerCAmelCase_ , lowerCAmelCase_) , depth - 1)
triangle(lowerCAmelCase_ , get_mid(lowerCAmelCase_ , lowerCAmelCase_) , get_mid(lowerCAmelCase_ , lowerCAmelCase_) , depth - 1)
triangle(lowerCAmelCase_ , get_mid(lowerCAmelCase_ , lowerCAmelCase_) , get_mid(lowerCAmelCase_ , lowerCAmelCase_) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__magic_name__ : Tuple = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__magic_name__ : Any = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 700 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = StableDiffusionDiffEditPipeline
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__UpperCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : List[str] = frozenset([] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCamelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCamelCase_ : Dict = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ )
lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Tuple = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Union[str, Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : int = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ : int = self.get_dummy_inputs(a_ )
lowerCamelCase_ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0]
lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(a_ , 1E-4 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ )
lowerCamelCase_ : int = pipe.generate_mask(**a_ )
lowerCamelCase_ : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ : List[str] = np.array([0] * 9 )
lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : Dict = pipe.invert(**a_ ).images
lowerCamelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Dict = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def _UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ )
lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ )
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : str = pipe.invert(**a_ ).images
lowerCamelCase_ : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _UpperCamelCase ( cls ):
lowerCamelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase_ : List[Any] = raw_image
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : str = "a bowl of fruit"
lowerCamelCase_ : Optional[int] = "a bowl of pears"
lowerCamelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents
lowerCamelCase_ : List[str] = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = "a bowl of fruit"
lowerCamelCase_ : Dict = "a bowl of pears"
lowerCamelCase_ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents
lowerCamelCase_ : Any = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 73 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 701 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 73 | 0 |
from __future__ import annotations
def __magic_name__ ( lowerCAmelCase_ = 4):
'''simple docstring'''
lowerCamelCase_ : Tuple = abs(lowerCAmelCase_) or 4
return [[1 + x + y * row_size for x in range(lowerCAmelCase_)] for y in range(lowerCAmelCase_)]
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return reverse_row(transpose(lowerCAmelCase_))
# OR.. transpose(reverse_column(matrix))
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return reverse_row(reverse_column(lowerCAmelCase_))
# OR.. reverse_column(reverse_row(matrix))
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return reverse_column(transpose(lowerCAmelCase_))
# OR.. transpose(reverse_row(matrix))
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = [list(lowerCAmelCase_) for x in zip(*lowerCAmelCase_)]
return matrix
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = matrix[::-1]
return matrix
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = [x[::-1] for x in matrix]
return matrix
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
for i in matrix:
print(*lowerCAmelCase_)
if __name__ == "__main__":
__magic_name__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
__magic_name__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
__magic_name__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 702 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCAmelCase__ :
"""simple docstring"""
def _UpperCamelCase ( self , a_ , a_ , a_ ):
return None
class lowerCAmelCase__ :
"""simple docstring"""
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
return None
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a_ , "tf" , 12 , **a_ )
@require_torch
@slow
def _UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a_ , "pt" , 12 , **a_ )
@require_torch
@slow
def _UpperCamelCase ( self ):
from transformers import BertModel
lowerCamelCase_ : Dict = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(a_ ) )
vocab_file.flush()
lowerCamelCase_ : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase_ : Tuple = BertModel(BertConfig(vocab_size=len(a_ ) ) )
model.save_pretrained(a_ )
self._test_export(a_ , "pt" , 12 , a_ )
@require_tf
@slow
def _UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase_ : Dict = self._test_export(a_ , "tf" , 12 , **a_ )
lowerCamelCase_ : List[Any] = quantize(Path(a_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a_ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def _UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase_ : int = self._test_export(a_ , "pt" , 12 , **a_ )
lowerCamelCase_ : Union[str, Any] = quantize(a_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a_ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_=None , **a_ ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase_ : Tuple = Path(a_ ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(a_ , a_ , a_ , a_ , a_ , **a_ )
return path
except Exception as e:
self.fail(a_ )
@require_torch
@require_tokenizers
@slow
def _UpperCamelCase ( self ):
from transformers import BertModel
lowerCamelCase_ : str = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowerCamelCase_ : int = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(a_ , a_ , "pt" )
@require_tf
@require_tokenizers
@slow
def _UpperCamelCase ( self ):
from transformers import TFBertModel
lowerCamelCase_ : List[str] = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowerCamelCase_ : List[str] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(a_ , a_ , "tf" )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : List[str] = FeatureExtractionPipeline(a_ , a_ )
lowerCamelCase_ : Dict = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
lowerCamelCase_ : Tuple = infer_shapes(a_ , a_ )
# Assert all variables are present
self.assertEqual(len(a_ ) , len(a_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , a_ )
self.assertSequenceEqual(variable_names[3:] , a_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["input_ids", "attention_mask", "token_type_ids"]
lowerCamelCase_ : Optional[int] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
lowerCamelCase_ : Tuple = ensure_valid_input(FuncContiguousArgs() , a_ , a_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(a_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(a_ ) , set(a_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(a_ , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase_ : Any = ensure_valid_input(FuncNonContiguousArgs() , a_ , a_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(len(a_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 703 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ):
lowerCamelCase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
lowerCamelCase_ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[int] = {}
if accepts_eta:
lowerCamelCase_ : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample
lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 |
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def _A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0E4 , lowerCAmelCase_ = False , lowerCAmelCase_ = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
lowerCamelCase_ : str = float(embedding_dim // 2)
lowerCamelCase_ : Dict = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift)
lowerCamelCase_ : int = min_timescale * jnp.exp(jnp.arange(lowerCAmelCase_ , dtype=jnp.floataa) * -log_timescale_increment)
lowerCamelCase_ : Optional[int] = jnp.expand_dims(lowerCAmelCase_ , 1) * jnp.expand_dims(lowerCAmelCase_ , 0)
# scale embeddings
lowerCamelCase_ : Tuple = scale * emb
if flip_sin_to_cos:
lowerCamelCase_ : Union[str, Any] = jnp.concatenate([jnp.cos(lowerCAmelCase_), jnp.sin(lowerCAmelCase_)] , axis=1)
else:
lowerCamelCase_ : str = jnp.concatenate([jnp.sin(lowerCAmelCase_), jnp.cos(lowerCAmelCase_)] , axis=1)
lowerCamelCase_ : Optional[int] = jnp.reshape(lowerCAmelCase_ , [jnp.shape(lowerCAmelCase_)[0], embedding_dim])
return signal
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
__UpperCAmelCase : int = 32
__UpperCAmelCase : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(a_ )
lowerCamelCase_ : Dict = nn.silu(a_ )
lowerCamelCase_ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(a_ )
return temb
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
__UpperCAmelCase : int = 32
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 1
@nn.compact
def __call__( self , a_ ):
return get_sinusoidal_embeddings(
a_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 705 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 0 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowerCamelCase_ : Optional[Any] = mf_knapsack(i - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
else:
lowerCamelCase_ : List[str] = max(
mf_knapsack(i - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) , mf_knapsack(i - 1 , lowerCAmelCase_ , lowerCAmelCase_ , j - wt[i - 1]) + val[i - 1] , )
lowerCamelCase_ : Optional[int] = val
return f[i][j]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Tuple = [[0] * (w + 1) for _ in range(n + 1)]
for i in range(1 , n + 1):
for w_ in range(1 , w + 1):
if wt[i - 1] <= w_:
lowerCamelCase_ : int = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_])
else:
lowerCamelCase_ : str = dp[i - 1][w_]
return dp[n][w_], dp
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if not (isinstance(lowerCAmelCase_ , (list, tuple)) and isinstance(lowerCAmelCase_ , (list, tuple))):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples")
lowerCamelCase_ : Tuple = len(lowerCAmelCase_)
if num_items != len(lowerCAmelCase_):
lowerCamelCase_ : str = (
"The number of weights must be the same as the number of values.\n"
F"""But got {num_items} weights and {len(lowerCAmelCase_)} values"""
)
raise ValueError(lowerCAmelCase_)
for i in range(lowerCAmelCase_):
if not isinstance(wt[i] , lowerCAmelCase_):
lowerCamelCase_ : List[Any] = (
"All weights must be integers but got weight of "
F"""type {type(wt[i])} at index {i}"""
)
raise TypeError(lowerCAmelCase_)
lowerCamelCase_ : Any = knapsack(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : set = set()
_construct_solution(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return optimal_val, example_optional_set
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCAmelCase_ , lowerCAmelCase_ , i - 1 , lowerCAmelCase_ , lowerCAmelCase_)
else:
optimal_set.add(lowerCAmelCase_)
_construct_solution(lowerCAmelCase_ , lowerCAmelCase_ , i - 1 , j - wt[i - 1] , lowerCAmelCase_)
if __name__ == "__main__":
__magic_name__ = [3, 2, 4, 4]
__magic_name__ = [4, 3, 2, 3]
__magic_name__ = 4
__magic_name__ = 6
__magic_name__ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__magic_name__ , __magic_name__ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__magic_name__ , __magic_name__ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 706 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.