code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def __UpperCAmelCase ( lowercase = 50_00_00_00 ):
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = int((limit - 24) ** (1 / 2) )
_UpperCAmelCase = set(range(3 ,prime_square_limit + 1 ,2 ) )
primes.add(2 )
for p in range(3 ,prime_square_limit + 1 ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,lowerCAmelCase__ ) ) )
for primea in primes:
_UpperCAmelCase = primea * primea
for primea in primes:
_UpperCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_UpperCAmelCase = primea * primea * primea * primea
_UpperCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase__ )
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 365
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = set(SCREAMING_SNAKE_CASE__ ), [start]
while stack:
_UpperCAmelCase = stack.pop()
explored.add(SCREAMING_SNAKE_CASE__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(SCREAMING_SNAKE_CASE__ )
return explored
UpperCAmelCase__ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 366
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 0
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
def decorator(lowercase ):
_UpperCAmelCase = getattr(lowerCamelCase_ ,"""handle_key""" ,[] )
handle += [key]
setattr(lowerCamelCase_ ,"""handle_key""" ,lowerCamelCase_ )
return func
return decorator
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
def decorator(lowercase ):
_UpperCAmelCase = getattr(lowerCamelCase_ ,"""handle_key""" ,[] )
handle += keys
setattr(lowerCamelCase_ ,"""handle_key""" ,lowerCamelCase_ )
return func
return decorator
class a ( _a ):
def __new__( cls : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = super().__new__(cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not hasattr(__lowerCAmelCase , """key_handler""" ):
setattr(__lowerCAmelCase , """key_handler""" , {} )
setattr(__lowerCAmelCase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(__lowerCAmelCase , """handle_key""" , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def lowerCAmelCase_ ( cls : Optional[Any] ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(__lowerCAmelCase )
_UpperCAmelCase = cls.key_handler.get(__lowerCAmelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def __UpperCAmelCase ( cls ):
"""simple docstring"""
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
| 367
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
| 0
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a ( datasets.BuilderConfig ):
_snake_case : Dict = None
def __UpperCAmelCase ( lowercase ,lowercase ,):
"""simple docstring"""
import pyspark
def generate_fn():
_UpperCAmelCase = df.select("""*""" ,pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_UpperCAmelCase = df_with_partition_id.select("""*""" ).where(f'''part_id = {partition_id}''' ).drop("""part_id""" )
_UpperCAmelCase = partition_df.collect()
_UpperCAmelCase = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class a ( _BaseExamplesIterable ):
def __init__( self : List[str] , __lowerCAmelCase : "pyspark.sql.DataFrame" , __lowerCAmelCase : Optional[int]=None , ):
_UpperCAmelCase = df
_UpperCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() )
_UpperCAmelCase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Any ):
yield from self.generate_examples_fn()
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : np.random.Generator ):
_UpperCAmelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCamelCase__ )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase__ )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.split_shard_indices_by_worker(UpperCamelCase__ , UpperCamelCase__ )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase__ )
@property
def lowerCAmelCase_ ( self : str ):
return len(self.partition_order )
class a ( datasets.DatasetBuilder ):
_snake_case : Optional[Any] = SparkConfig
def __init__( self : List[str] , __lowerCAmelCase : "pyspark.sql.DataFrame" , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , **__lowerCAmelCase : List[str] , ):
import pyspark
_UpperCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate()
_UpperCAmelCase = df
_UpperCAmelCase = working_dir
super().__init__(
cache_dir=UpperCamelCase__ , config_name=str(self.df.semanticHash() ) , **UpperCamelCase__ , )
def lowerCAmelCase_ ( self : List[str] ):
def create_cache_and_write_probe(__lowerCAmelCase : Any ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCamelCase__ )
_UpperCAmelCase = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCamelCase__ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_UpperCAmelCase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCamelCase__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCAmelCase_ ( self : Dict ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Any ):
import pyspark
def get_arrow_batch_size(__lowerCAmelCase : Tuple ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_UpperCAmelCase = self.df.count()
_UpperCAmelCase = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_UpperCAmelCase = (
self.df.limit(UpperCamelCase__ )
.repartition(1 )
.mapInArrow(UpperCamelCase__ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_UpperCAmelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_UpperCAmelCase = min(UpperCamelCase__ , int(approx_total_size / max_shard_size ) )
_UpperCAmelCase = self.df.repartition(UpperCamelCase__ )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int , ):
import pyspark
_UpperCAmelCase = ParquetWriter if file_format == """parquet""" else ArrowWriter
_UpperCAmelCase = os.path.join(self._working_dir , os.path.basename(UpperCamelCase__ ) ) if self._working_dir else fpath
_UpperCAmelCase = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_UpperCAmelCase = self.config.features
_UpperCAmelCase = self._writer_batch_size
_UpperCAmelCase = self._fs.storage_options
def write_arrow(__lowerCAmelCase : Any ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_UpperCAmelCase = pyspark.TaskContext().taskAttemptId()
_UpperCAmelCase = next(UpperCamelCase__ , UpperCamelCase__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
_UpperCAmelCase = 0
_UpperCAmelCase = writer_class(
features=UpperCamelCase__ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=UpperCamelCase__ , storage_options=UpperCamelCase__ , embed_local_files=UpperCamelCase__ , )
_UpperCAmelCase = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCamelCase__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
_UpperCAmelCase = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=UpperCamelCase__ , storage_options=UpperCamelCase__ , embed_local_files=UpperCamelCase__ , )
_UpperCAmelCase = pa.Table.from_batches([batch] )
writer.write_table(UpperCamelCase__ )
if writer._num_bytes > 0:
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCamelCase__ ) ):
_UpperCAmelCase = os.path.join(os.path.dirname(UpperCamelCase__ ) , os.path.basename(UpperCamelCase__ ) )
shutil.move(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = (
self.df.mapInArrow(UpperCamelCase__ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : "datasets.SplitGenerator" , __lowerCAmelCase : str = "arrow" , __lowerCAmelCase : Optional[Union[str, int]] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : List[str] , ):
self._validate_cache_dir()
_UpperCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCamelCase__ )
_UpperCAmelCase = not is_remote_filesystem(self._fs )
_UpperCAmelCase = os.path.join if is_local else posixpath.join
_UpperCAmelCase = """-TTTTT-SSSSS-of-NNNNN"""
_UpperCAmelCase = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
_UpperCAmelCase = path_join(self._output_dir , UpperCamelCase__ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = []
_UpperCAmelCase = []
for task_id, content in self._prepare_split_single(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCamelCase__ )
_UpperCAmelCase = total_num_examples
_UpperCAmelCase = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
_UpperCAmelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_UpperCAmelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
rename(
UpperCamelCase__ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
_UpperCAmelCase = []
_UpperCAmelCase = 0
for i in range(len(UpperCamelCase__ ) ):
_UpperCAmelCase , _UpperCAmelCase = task_id_and_num_shards[i]
for shard_id in range(UpperCamelCase__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCamelCase__ , len(UpperCamelCase__ ) ).map(lambda __lowerCAmelCase : _rename_shard(*UpperCamelCase__ ) ).collect()
else:
# don't use any pattern
_UpperCAmelCase = 0
_UpperCAmelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(UpperCamelCase__ , """""" ) , )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 368
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(snake_case_ ) * abs(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 369
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class a ( _a ):
_snake_case : Dict = """ctrl"""
_snake_case : int = ["""past_key_values"""]
_snake_case : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __lowerCAmelCase : Tuple=24_6534 , __lowerCAmelCase : str=256 , __lowerCAmelCase : Optional[int]=1280 , __lowerCAmelCase : List[Any]=8192 , __lowerCAmelCase : Tuple=48 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Any=1e-6 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Optional[int]=True , **__lowerCAmelCase : Optional[Any] , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = dff
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
super().__init__(**__lowerCAmelCase )
| 370
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = abs(__a )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = abs(__a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return sum(int(__a ) for c in str(abs(__a ) ) )
def __UpperCAmelCase ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase ,lowercase ) -> None:
_UpperCAmelCase = f'''{func.__name__}({value})'''
_UpperCAmelCase = timeit(f'''__main__.{call}''' ,setup="""import __main__""" )
print(f'''{call:56} = {func(__a )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__a ,__a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 371
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_snake_case : Any = StableDiffusionInstructPixaPixPipeline
_snake_case : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_snake_case : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_snake_case : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase_ ( self : Tuple ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase = CLIPTextModel(__lowerCAmelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_UpperCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any]=0 ):
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" )
if str(__lowerCAmelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = sd_pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = """french fries"""
_UpperCAmelCase = sd_pipe(**__lowerCAmelCase , negative_prompt=__lowerCAmelCase )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = [inputs["""prompt"""]] * 2
_UpperCAmelCase = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
_UpperCAmelCase = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.permute(0 , 3 , 1 , 2 )
_UpperCAmelCase = image.repeat(2 , 1 , 1 , 1 )
_UpperCAmelCase = sd_pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_UpperCAmelCase = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = sd_pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = [round(__lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(__lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase_ ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = VaeImageProcessor(do_resize=__lowerCAmelCase , do_normalize=__lowerCAmelCase )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = pipe(**self.get_dummy_inputs_by_type(__lowerCAmelCase , input_image_type="""pt""" ) )[0]
_UpperCAmelCase = components["""vae"""]
_UpperCAmelCase = self.get_dummy_inputs_by_type(__lowerCAmelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_UpperCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
_UpperCAmelCase = pipe(**__lowerCAmelCase )[0]
_UpperCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(__lowerCAmelCase , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str]=0 ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
_UpperCAmelCase = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase )
_UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase )
_UpperCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = 0
def callback_fn(__lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor ) -> None:
_UpperCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_UpperCAmelCase = False
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
pipe(**__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase_ ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_UpperCAmelCase = inputs["""image"""].resize((504, 504) )
_UpperCAmelCase = """timbrooks/instruct-pix2pix"""
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__lowerCAmelCase , safety_checker=__lowerCAmelCase , )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = pipe(**__lowerCAmelCase )
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
_UpperCAmelCase = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 350
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class a :
_snake_case : int
_snake_case : TreeNode | None = None
_snake_case : TreeNode | None = None
UpperCAmelCase__ = namedtuple("""CoinsDistribResult""", """moves excess""")
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(lowercase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase ) != count_coins(lowercase ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(lowercase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 ,1 )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.left )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.right )
_UpperCAmelCase = 1 - left_distrib_excess
_UpperCAmelCase = 1 - right_distrib_excess
_UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase )
+ abs(lowercase )
)
_UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase ,lowercase )
return get_distrib(lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class a ( lowercase_ ):
_snake_case : Any = "speech_to_text"
_snake_case : List[str] = ["past_key_values"]
_snake_case : List[str] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Union[str, Any] , __lowerCAmelCase : Any=1_0000 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Any=2048 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : List[Any]=2048 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : str=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any="relu" , __lowerCAmelCase : str=256 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : str=True , __lowerCAmelCase : Any=1 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=6000 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=(5, 5) , __lowerCAmelCase : Any=1024 , __lowerCAmelCase : str=80 , __lowerCAmelCase : List[str]=1 , **__lowerCAmelCase : int , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = use_cache
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = max_source_positions
_UpperCAmelCase = max_target_positions
_UpperCAmelCase = num_conv_layers
_UpperCAmelCase = list(a__ )
_UpperCAmelCase = conv_channels
_UpperCAmelCase = input_feat_per_channel
_UpperCAmelCase = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , **a__ , )
| 353
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
"""simple docstring"""
import sys
UpperCAmelCase__ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __UpperCAmelCase ( lowercase = N ):
"""simple docstring"""
_UpperCAmelCase = -sys.maxsize - 1
for i in range(len(lowerCamelCase_ ) - 12 ):
_UpperCAmelCase = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_UpperCAmelCase = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 355
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30
| 0
|
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class a :
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : List[str] ):
raise NotImplementedError()
def lowerCAmelCase_ ( self : List[str] ):
raise NotImplementedError()
class a ( lowerCAmelCase_ ):
def __init__( self : Any , __lowerCAmelCase : "AutoTokenizer" , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Any ):
_UpperCAmelCase = tokenizer
_UpperCAmelCase = skip_prompt
_UpperCAmelCase = decode_kwargs
# variables used in the streaming process
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = True
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : str ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
_UpperCAmelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_UpperCAmelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
_UpperCAmelCase = text[self.print_len :]
_UpperCAmelCase = []
_UpperCAmelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(_A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_UpperCAmelCase = text[self.print_len :]
self.print_len += len(_A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_UpperCAmelCase = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(_A )
self.on_finalized_text(_A )
def lowerCAmelCase_ ( self : Any ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_UpperCAmelCase = text[self.print_len :]
_UpperCAmelCase = []
_UpperCAmelCase = 0
else:
_UpperCAmelCase = ''
_UpperCAmelCase = True
self.on_finalized_text(_A , stream_end=_A )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : bool = False ):
print(_A , flush=_A , end="""""" if not stream_end else None )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e_00 and cp <= 0x9f_ff)
or (cp >= 0x34_00 and cp <= 0x4d_bf) #
or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) #
or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) #
or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) #
or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) #
or (cp >= 0xf9_00 and cp <= 0xfa_ff)
or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) #
): #
return True
return False
class a ( lowerCAmelCase_ ):
def __init__( self : int , __lowerCAmelCase : "AutoTokenizer" , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[float] = None , **__lowerCAmelCase : str ):
super().__init__(_A , _A , **_A )
_UpperCAmelCase = Queue()
_UpperCAmelCase = None
_UpperCAmelCase = timeout
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : bool = False ):
self.text_queue.put(_A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 356
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
| 0
|
from math import factorial
def __UpperCAmelCase ( lowercase = 1_00 ):
"""simple docstring"""
return sum(map(_A ,str(factorial(_A ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 357
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'layoutlmv3'
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class a ( lowerCAmelCase_ ):
_snake_case : str = version.parse('1.12' )
@property
def lowerCAmelCase_ ( self : Dict ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 30
| 0
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a :
def __init__( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any]=sys.maxsize ):
_UpperCAmelCase = """bilinear"""
_UpperCAmelCase = max_size
_UpperCAmelCase = short_edge_length
def __call__( self : List[str] , __lowerCAmelCase : str ):
_UpperCAmelCase = []
for img in imgs:
_UpperCAmelCase , _UpperCAmelCase = img.shape[:2]
# later: provide list and randomly choose index for resize
_UpperCAmelCase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_UpperCAmelCase = size * 1.0 / min(A__ , A__ )
if h < w:
_UpperCAmelCase , _UpperCAmelCase = size, scale * w
else:
_UpperCAmelCase , _UpperCAmelCase = scale * h, size
if max(A__ , A__ ) > self.max_size:
_UpperCAmelCase = self.max_size * 1.0 / max(A__ , A__ )
_UpperCAmelCase = newh * scale
_UpperCAmelCase = neww * scale
_UpperCAmelCase = int(neww + 0.5 )
_UpperCAmelCase = int(newh + 0.5 )
if img.dtype == np.uinta:
_UpperCAmelCase = Image.fromarray(A__ )
_UpperCAmelCase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_UpperCAmelCase = np.asarray(A__ )
else:
_UpperCAmelCase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_UpperCAmelCase = nn.functional.interpolate(
A__ , (newh, neww) , mode=self.interp_method , align_corners=A__ ).squeeze(0 )
img_augs.append(A__ )
return img_augs
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict ):
_UpperCAmelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_UpperCAmelCase = cfg.INPUT.FORMAT
_UpperCAmelCase = cfg.SIZE_DIVISIBILITY
_UpperCAmelCase = cfg.PAD_VALUE
_UpperCAmelCase = cfg.INPUT.MAX_SIZE_TEST
_UpperCAmelCase = cfg.MODEL.DEVICE
_UpperCAmelCase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_UpperCAmelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_UpperCAmelCase = lambda __lowerCAmelCase : (x - self.pixel_mean) / self.pixel_std
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = tuple(max(A__ ) for s in zip(*[img.shape for img in images] ) )
_UpperCAmelCase = [im.shape[-2:] for im in images]
_UpperCAmelCase = [
nn.functional.pad(
A__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(A__ , A__ )
]
return torch.stack(A__ ), torch.tensor(A__ )
def __call__( self : int , __lowerCAmelCase : str , __lowerCAmelCase : str=False ):
with torch.no_grad():
if not isinstance(A__ , A__ ):
_UpperCAmelCase = [images]
if single_image:
assert len(A__ ) == 1
for i in range(len(A__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(A__ , images.pop(A__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
A__ , torch.as_tensor(img_tensorize(images.pop(A__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_UpperCAmelCase = torch.tensor([im.shape[:2] for im in images] )
_UpperCAmelCase = self.aug(A__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_UpperCAmelCase = [self.normalizer(A__ ) for x in images]
# now pad them to do the following operations
_UpperCAmelCase , _UpperCAmelCase = self.pad(A__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_UpperCAmelCase = torch.true_divide(A__ , A__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
assert torch.isfinite(lowerCAmelCase__ ).all(), "Box tensor contains infinite or NaN!"
_UpperCAmelCase , _UpperCAmelCase = box_size
tensor[:, 0].clamp_(min=0 ,max=lowerCAmelCase__ )
tensor[:, 1].clamp_(min=0 ,max=lowerCAmelCase__ )
tensor[:, 2].clamp_(min=0 ,max=lowerCAmelCase__ )
tensor[:, 3].clamp_(min=0 ,max=lowerCAmelCase__ )
| 358
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( lowercase=None ,lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case : Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
int(lowercase )
return True
except ValueError:
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
float(lowercase )
return True
except ValueError:
return False
class a :
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]["""result"""]
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" )
title_str += f''' {label_model_name} vs.'''
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class a ( unittest.TestCase ):
def __init__( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=99 , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : Any=5 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Union[str, Any]=512 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Any=4 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class a ( __a , unittest.TestCase ):
_snake_case : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""albert-base-v2""" )
_UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(a__ )
@require_flax
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
_UpperCAmelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase = model(a__ , attention_mask=a__ )[0]
_UpperCAmelCase = (1, 11, 768)
self.assertEqual(output.shape , a__ )
_UpperCAmelCase = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) )
| 359
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class a ( lowerCamelCase__ ):
_snake_case : Optional[int] = 'biogpt'
def __init__( self : Tuple , __lowerCAmelCase : Optional[int]=4_2384 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : str=24 , __lowerCAmelCase : Dict=16 , __lowerCAmelCase : Optional[int]=4096 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Union[str, Any]=1024 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Tuple=1e-1_2 , __lowerCAmelCase : str=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : str=0.0 , __lowerCAmelCase : Any=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : List[Any]=2 , **__lowerCAmelCase : Any , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
| 360
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
| 0
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" ,[
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" ,num_bytes=13_37 ,num_examples=42 ,dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" ,num_bytes=13_37 ,num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] ,)
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = split_dict._to_yaml_list()
assert len(__snake_case ) == len(__snake_case )
_UpperCAmelCase = SplitDict._from_yaml_list(__snake_case )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_UpperCAmelCase = None
# the split name of split_dict takes over the name of the split info object
_UpperCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" ,[SplitInfo(), SplitInfo(dataset_name=__snake_case ), SplitInfo(dataset_name="""my_dataset""" )] )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 361
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( _snake_case , unittest.TestCase ):
_snake_case : Tuple = KandinskyVaaPriorPipeline
_snake_case : List[Any] = ['prompt']
_snake_case : int = ['prompt', 'negative_prompt']
_snake_case : Tuple = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
_snake_case : Any = False
@property
def lowerCAmelCase_ ( self : Tuple ):
return 32
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return 32
@property
def lowerCAmelCase_ ( self : Dict ):
return self.time_input_dim
@property
def lowerCAmelCase_ ( self : List[Any] ):
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return 100
@property
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def lowerCAmelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCAmelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
_UpperCAmelCase = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_UpperCAmelCase = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_UpperCAmelCase = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.dummy_prior
_UpperCAmelCase = self.dummy_image_encoder
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = self.dummy_tokenizer
_UpperCAmelCase = self.dummy_image_processor
_UpperCAmelCase = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
_UpperCAmelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any=0 ):
if str(UpperCamelCase__ ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(UpperCamelCase__ )
else:
_UpperCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_UpperCAmelCase = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**UpperCamelCase__ )
_UpperCAmelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCAmelCase = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
_UpperCAmelCase = output.image_embeds
_UpperCAmelCase = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
_UpperCAmelCase = image[0, -10:]
_UpperCAmelCase = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_UpperCAmelCase = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = torch_device == """cpu"""
_UpperCAmelCase = True
_UpperCAmelCase = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = torch_device == """cpu"""
_UpperCAmelCase = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 362
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
_UpperCAmelCase = gray_code_sequence_string(SCREAMING_SNAKE_CASE_ )
#
# convert them to integers
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
_UpperCAmelCase = int(sequence[i] ,2 )
return sequence
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCAmelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCAmelCase = gray_code_sequence_string(bit_count - 1 )
_UpperCAmelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCAmelCase = """0""" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCAmelCase = """1""" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class a ( _SCREAMING_SNAKE_CASE ):
_snake_case : List[Any] = 42
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : List[Any] , __lowerCAmelCase : Tuple = 6_5536 , __lowerCAmelCase : List[str] = None , __lowerCAmelCase : Dict = 2 , __lowerCAmelCase : Tuple = 2 , __lowerCAmelCase : Optional[int] = 0 , __lowerCAmelCase : Optional[Any] = "fourier" , __lowerCAmelCase : Optional[int] = True , __lowerCAmelCase : Tuple = False , __lowerCAmelCase : List[Any] = 0.0 , __lowerCAmelCase : Dict = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __lowerCAmelCase : Union[str, Any] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __lowerCAmelCase : Any = "UNetMidBlock1D" , __lowerCAmelCase : List[Any] = None , __lowerCAmelCase : int = (32, 32, 64) , __lowerCAmelCase : Any = None , __lowerCAmelCase : Optional[Any] = 8 , __lowerCAmelCase : str = 1 , __lowerCAmelCase : str = False , ):
super().__init__()
_UpperCAmelCase = sample_size
# time
if time_embedding_type == "fourier":
_UpperCAmelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=__lowerCAmelCase , log=__lowerCAmelCase , flip_sin_to_cos=__lowerCAmelCase )
_UpperCAmelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_UpperCAmelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=__lowerCAmelCase , downscale_freq_shift=__lowerCAmelCase )
_UpperCAmelCase = block_out_channels[0]
if use_timestep_embedding:
_UpperCAmelCase = block_out_channels[0] * 4
_UpperCAmelCase = TimestepEmbedding(
in_channels=__lowerCAmelCase , time_embed_dim=__lowerCAmelCase , act_fn=__lowerCAmelCase , out_dim=block_out_channels[0] , )
_UpperCAmelCase = nn.ModuleList([] )
_UpperCAmelCase = None
_UpperCAmelCase = nn.ModuleList([] )
_UpperCAmelCase = None
# down
_UpperCAmelCase = in_channels
for i, down_block_type in enumerate(__lowerCAmelCase ):
_UpperCAmelCase = output_channel
_UpperCAmelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_UpperCAmelCase = i == len(__lowerCAmelCase ) - 1
_UpperCAmelCase = get_down_block(
__lowerCAmelCase , num_layers=__lowerCAmelCase , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(__lowerCAmelCase )
# mid
_UpperCAmelCase = get_mid_block(
__lowerCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__lowerCAmelCase , add_downsample=__lowerCAmelCase , )
# up
_UpperCAmelCase = list(reversed(__lowerCAmelCase ) )
_UpperCAmelCase = reversed_block_out_channels[0]
if out_block_type is None:
_UpperCAmelCase = out_channels
else:
_UpperCAmelCase = block_out_channels[0]
for i, up_block_type in enumerate(__lowerCAmelCase ):
_UpperCAmelCase = output_channel
_UpperCAmelCase = (
reversed_block_out_channels[i + 1] if i < len(__lowerCAmelCase ) - 1 else final_upsample_channels
)
_UpperCAmelCase = i == len(__lowerCAmelCase ) - 1
_UpperCAmelCase = get_up_block(
__lowerCAmelCase , num_layers=__lowerCAmelCase , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(__lowerCAmelCase )
_UpperCAmelCase = output_channel
# out
_UpperCAmelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_UpperCAmelCase = get_out_block(
out_block_type=__lowerCAmelCase , num_groups_out=__lowerCAmelCase , embed_dim=block_out_channels[0] , out_channels=__lowerCAmelCase , act_fn=__lowerCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int = True , ):
_UpperCAmelCase = timestep
if not torch.is_tensor(__lowerCAmelCase ):
_UpperCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(__lowerCAmelCase ) and len(timesteps.shape ) == 0:
_UpperCAmelCase = timesteps[None].to(sample.device )
_UpperCAmelCase = self.time_proj(__lowerCAmelCase )
if self.config.use_timestep_embedding:
_UpperCAmelCase = self.time_mlp(__lowerCAmelCase )
else:
_UpperCAmelCase = timestep_embed[..., None]
_UpperCAmelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_UpperCAmelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_UpperCAmelCase = ()
for downsample_block in self.down_blocks:
_UpperCAmelCase , _UpperCAmelCase = downsample_block(hidden_states=__lowerCAmelCase , temb=__lowerCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_UpperCAmelCase = self.mid_block(__lowerCAmelCase , __lowerCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_UpperCAmelCase = down_block_res_samples[-1:]
_UpperCAmelCase = down_block_res_samples[:-1]
_UpperCAmelCase = upsample_block(__lowerCAmelCase , res_hidden_states_tuple=__lowerCAmelCase , temb=__lowerCAmelCase )
# 5. post-process
if self.out_block:
_UpperCAmelCase = self.out_block(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__lowerCAmelCase )
| 365
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class a :
@staticmethod
def lowerCAmelCase_ ( *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ):
pass
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCAmelCase__ = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class a ( unittest.TestCase ):
_snake_case : str = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = pipeline(
"""document-question-answering""" , model=__lowercase , tokenizer=__lowercase , image_processor=__lowercase )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , """""" ) ) )
_UpperCAmelCase = '''What is the placebo?'''
_UpperCAmelCase = [
{
'''image''': load_image(__lowercase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
_UpperCAmelCase = dqa_pipeline(__lowercase , top_k=2 )
self.assertEqual(
__lowercase , [
[
{"""score""": ANY(__lowercase ), """answer""": ANY(__lowercase ), """start""": ANY(__lowercase ), """end""": ANY(__lowercase )},
{"""score""": ANY(__lowercase ), """answer""": ANY(__lowercase ), """start""": ANY(__lowercase ), """end""": ANY(__lowercase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''How many cats are there?'''
_UpperCAmelCase = [
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
_UpperCAmelCase = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , __lowercase )
_UpperCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , __lowercase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_UpperCAmelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCAmelCase = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(__lowercase , [] )
# We can optionnally pass directly the words and bounding boxes
_UpperCAmelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = dqa_pipeline(image=__lowercase , question=__lowercase , words=__lowercase , boxes=__lowercase , top_k=2 )
self.assertEqual(__lowercase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_UpperCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_UpperCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_UpperCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_UpperCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__lowercase )
_UpperCAmelCase = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__lowercase , revision="""3dc6de3""" , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
_UpperCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
_UpperCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
_UpperCAmelCase = list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , """""" ) ) )
# This model should also work if `image` is set to None
_UpperCAmelCase = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__lowercase )
_UpperCAmelCase = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__lowercase , revision="""3dc6de3""" , max_seq_len=50 , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_UpperCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
_UpperCAmelCase = list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , """""" ) ) )
# This model should also work if `image` is set to None
_UpperCAmelCase = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def lowerCAmelCase_ ( self : Any ):
pass
| 366
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( UpperCamelCase__ ):
_snake_case : Any = ["""pixel_values"""]
def __init__( self : Any , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Dict[str, int]] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[int, float] = 1 / 255 , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , **__lowerCAmelCase : Dict , ):
super().__init__(**__a )
_UpperCAmelCase = size if size is not None else {"""height""": 224, """width""": 224}
_UpperCAmelCase = get_size_dict(__a )
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_UpperCAmelCase = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Optional[Any] , ):
_UpperCAmelCase = get_size_dict(__a )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCAmelCase = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : int , ):
_UpperCAmelCase = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : List[str] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Dict , ):
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : ImageInput , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : int = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[float] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowerCAmelCase : Dict , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__a )
if not is_batched(__a ):
_UpperCAmelCase = [images]
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__a ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__a , __a ) for image in images]
_UpperCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 367
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCAmelCase__ = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCAmelCase__ = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCAmelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = random.randint(0 ,len(lowerCAmelCase_ ) - 1 )
_UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = list(lowerCAmelCase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCAmelCase = random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
_UpperCAmelCase = []
# Generate more children proportionally to the fitness score.
_UpperCAmelCase = int(parent_a[1] * 1_00 ) + 1
_UpperCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
_UpperCAmelCase = population_score[random.randint(0 ,lowerCAmelCase_ )][0]
_UpperCAmelCase = crossover(parent_a[0] ,lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_ ,lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_ ,lowerCAmelCase_ ) )
return pop
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = True ):
"""simple docstring"""
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_UpperCAmelCase = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCAmelCase = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
_UpperCAmelCase = []
for _ in range(lowerCAmelCase_ ):
population.append("""""".join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCAmelCase = [evaluate(lowerCAmelCase_ ,lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCAmelCase = sorted(lowerCAmelCase_ ,key=lambda lowercase : x[1] ,reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
_UpperCAmelCase = [
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )] ,lowerCAmelCase_ ,lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCAmelCase__ = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCAmelCase__ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\"""
)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 368
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 0
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
UpperCAmelCase__ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase__ = re.compile(r"""^\s*else:""")
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
_UpperCAmelCase = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCAmelCase = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
_UpperCAmelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
_UpperCAmelCase = re.findall("""\[([^\]]+)\]""" ,__SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
_UpperCAmelCase = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
_UpperCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
_UpperCAmelCase = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
_UpperCAmelCase = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
_UpperCAmelCase = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ )
_UpperCAmelCase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
_UpperCAmelCase = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ )
_UpperCAmelCase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
_UpperCAmelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCAmelCase = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCAmelCase = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
_UpperCAmelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
def find_duplicates(lowercase ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCAmelCase = []
for key in import_dict_objects.keys():
_UpperCAmelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_UpperCAmelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCAmelCase = "base imports" if key == "none" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
_UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE ,"""__init__.py""" )
_UpperCAmelCase = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
_UpperCAmelCase = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("""\n\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob("""*.py""" ) ) ) == 0:
continue
_UpperCAmelCase = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = short_path.replace(os.path.sep ,""".""" )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
_UpperCAmelCase = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = short_path.replace(""".py""" ,"""""" ).replace(os.path.sep ,""".""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
UpperCAmelCase__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __UpperCAmelCase ( ):
"""simple docstring"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase = importlib.util.spec_from_file_location(
"""transformers""" ,os.path.join(__SCREAMING_SNAKE_CASE ,"""__init__.py""" ) ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,)
_UpperCAmelCase = spec.loader.load_module()
_UpperCAmelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = "\n".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 369
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 0
|
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
def __call__( self : Optional[int] ):
_UpperCAmelCase = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_UpperCAmelCase = 1
_UpperCAmelCase = self.unet(A_ , A_ ).sample
_UpperCAmelCase = self.scheduler.step(A_ , A_ , A_ ).prev_sample
_UpperCAmelCase = scheduler_output - scheduler_output + torch.ones_like(A_ )
return result
| 371
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class a ( lowerCAmelCase_ ):
_snake_case : Dict = 'time_series_transformer'
_snake_case : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : str = "student_t" , __lowerCAmelCase : str = "nll" , __lowerCAmelCase : int = 1 , __lowerCAmelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowerCAmelCase : Optional[Union[str, bool]] = "mean" , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : bool = True , __lowerCAmelCase : str = "gelu" , __lowerCAmelCase : int = 64 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : int = 100 , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : int=True , **__lowerCAmelCase : List[str] , ):
# time series specific configuration
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length or prediction_length
_UpperCAmelCase = distribution_output
_UpperCAmelCase = loss
_UpperCAmelCase = input_size
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = scaling
_UpperCAmelCase = num_dynamic_real_features
_UpperCAmelCase = num_static_real_features
_UpperCAmelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
_UpperCAmelCase = cardinality
else:
_UpperCAmelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
_UpperCAmelCase = embedding_dimension
else:
_UpperCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase = input_size * len(__lowerCAmelCase ) + self._number_of_features
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = use_cache
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 350
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
_UpperCAmelCase = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_UpperCAmelCase = 1
if upper_limit > 0:
_UpperCAmelCase = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 ,upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
UpperCAmelCase__ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 351
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
UpperCAmelCase__ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
_snake_case : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
_snake_case : Optional[str] = field(default=lowerCAmelCase_ , metadata={'help': 'A folder containing the training data.'} )
_snake_case : Optional[str] = field(default=lowerCAmelCase_ , metadata={'help': 'A folder containing the validation data.'} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_snake_case : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
_snake_case : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
_snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = {}
if self.train_dir is not None:
_UpperCAmelCase = self.train_dir
if self.validation_dir is not None:
_UpperCAmelCase = self.validation_dir
_UpperCAmelCase = data_files if data_files else None
@dataclass
class a :
_snake_case : str = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCAmelCase_ )} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
_snake_case : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_snake_case : str = field(default=lowerCAmelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
_snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
_snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={'help': 'Stride to use for the encoder.'} , )
class a :
def __init__( self : Any , __lowerCAmelCase : Optional[Any]=192 , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : List[str]=0.6 ):
_UpperCAmelCase = input_size
_UpperCAmelCase = mask_patch_size
_UpperCAmelCase = model_patch_size
_UpperCAmelCase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
_UpperCAmelCase = self.input_size // self.mask_patch_size
_UpperCAmelCase = self.mask_patch_size // self.model_patch_size
_UpperCAmelCase = self.rand_size**2
_UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
_UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count]
_UpperCAmelCase = np.zeros(self.token_count , dtype=__lowerCAmelCase )
_UpperCAmelCase = 1
_UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) )
_UpperCAmelCase = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] )
_UpperCAmelCase = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" ,lowercase ,lowercase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
_UpperCAmelCase = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,data_files=data_args.data_files ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,lowercase ) and data_args.train_val_split > 0.0:
_UpperCAmelCase = ds["""train"""].train_test_split(data_args.train_val_split )
_UpperCAmelCase = split["""train"""]
_UpperCAmelCase = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path ,**lowercase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path ,**lowercase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowercase ,"""decoder_type""" ):
_UpperCAmelCase = """simmim"""
# adapt config
_UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size
_UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_UpperCAmelCase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
_UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name ,**lowercase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path ,**lowercase )
else:
_UpperCAmelCase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=lowercase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
logger.info("""Training new model from scratch""" )
_UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(lowercase )
if training_args.do_train:
_UpperCAmelCase = ds["""train"""].column_names
else:
_UpperCAmelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
_UpperCAmelCase = data_args.image_column_name
elif "image" in column_names:
_UpperCAmelCase = """image"""
elif "img" in column_names:
_UpperCAmelCase = """img"""
else:
_UpperCAmelCase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_UpperCAmelCase = Compose(
[
Lambda(lambda lowercase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size ,scale=(0.67, 1.0) ,ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean ,std=image_processor.image_std ),
] )
# create mask generator
_UpperCAmelCase = MaskGenerator(
input_size=model_args.image_size ,mask_patch_size=data_args.mask_patch_size ,model_patch_size=model_args.patch_size ,mask_ratio=data_args.mask_ratio ,)
def preprocess_images(lowercase ):
_UpperCAmelCase = [transforms(lowercase ) for image in examples[image_column_name]]
_UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
_UpperCAmelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
_UpperCAmelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase )
# Initialize our trainer
_UpperCAmelCase = Trainer(
model=lowercase ,args=lowercase ,train_dataset=ds["""train"""] if training_args.do_train else None ,eval_dataset=ds["""validation"""] if training_args.do_eval else None ,tokenizer=lowercase ,data_collator=lowercase ,)
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model()
trainer.log_metrics("""train""" ,train_result.metrics )
trainer.save_metrics("""train""" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase = trainer.evaluate()
trainer.log_metrics("""eval""" ,lowercase )
trainer.save_metrics("""eval""" ,lowercase )
# Write model card and (optionally) push to hub
_UpperCAmelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
if __name__ == "__main__":
main()
| 352
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
import numpy as np
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return np.where(vector > 0 ,lowercase ,(alpha * (np.exp(lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
UpperCAmelCase__ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase__ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase__ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase__ = re.compile(r"""^\s*else:""")
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if _re_test_backend.search(lowercase ) is None:
return None
_UpperCAmelCase = [b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
with open(lowercase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = 0
while line_index < len(lowercase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCAmelCase = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
_UpperCAmelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
_UpperCAmelCase = _re_one_line_import_struct.search(lowercase ).groups()[0]
_UpperCAmelCase = re.findall(R"""\[([^\]]+)\]""" ,lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
_UpperCAmelCase = _re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
_UpperCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
_UpperCAmelCase = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
_UpperCAmelCase = lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
_UpperCAmelCase = _re_import_struct_add_many.search(lowercase ).groups()[0].split(""", """ )
_UpperCAmelCase = [obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
_UpperCAmelCase = _re_between_brackets.search(lowercase ).groups()[0].split(""", """ )
_UpperCAmelCase = [obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
_UpperCAmelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCAmelCase = []
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCAmelCase = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
_UpperCAmelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
def find_duplicates(lowercase ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCAmelCase = []
for key in import_dict_objects.keys():
_UpperCAmelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_UpperCAmelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCAmelCase = """base imports""" if key == """none""" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = []
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
_UpperCAmelCase = os.path.join(lowercase ,"""__init__.py""" )
_UpperCAmelCase = parse_init(lowercase )
if objects is not None:
_UpperCAmelCase = analyze_results(*lowercase )
if len(lowercase ) > 0:
_UpperCAmelCase = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError("""\n\n""".join(lowercase ) )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = []
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
_UpperCAmelCase = str((Path(lowercase ) / folder).relative_to(lowercase ) )
_UpperCAmelCase = short_path.replace(os.path.sep ,""".""" )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
_UpperCAmelCase = str((Path(lowercase ) / fname).relative_to(lowercase ) )
_UpperCAmelCase = short_path.replace(""".py""" ,"""""" ).replace(os.path.sep ,""".""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowercase )
return submodules
UpperCAmelCase__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def __UpperCAmelCase ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
_UpperCAmelCase = direct_transformers_import(lowercase )
_UpperCAmelCase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowercase ,"""__init__.py""" ) ,"""r""" ) as f:
_UpperCAmelCase = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" ,lowercase ) ) )
_UpperCAmelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowercase ) > 0:
_UpperCAmelCase = """\n""".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 355
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30
| 0
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = 1.5
_UpperCAmelCase = int(factor * num_class_images )
_UpperCAmelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" ,indice_name="""laion_400m""" ,num_images=lowercase ,aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' ,exist_ok=lowercase )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCAmelCase = client.query(text=lowercase )
if len(lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
_UpperCAmelCase = int(factor * num_images )
_UpperCAmelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" ,indice_name="""laion_400m""" ,num_images=lowercase ,aesthetic_weight=0.1 ,)
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = tqdm(desc="""downloading real regularization images""" ,total=lowercase )
with open(f'''{class_data_dir}/caption.txt''' ,"""w""" ) as fa, open(f'''{class_data_dir}/urls.txt''' ,"""w""" ) as fa, open(
f'''{class_data_dir}/images.txt''' ,"""w""" ) as fa:
while total < num_class_images:
_UpperCAmelCase = class_images[count]
count += 1
try:
_UpperCAmelCase = requests.get(images["""url"""] )
if img.status_code == 2_00:
_UpperCAmelCase = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' ,"""wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser("""""" ,add_help=lowercase )
parser.add_argument("""--class_prompt""" ,help="""text prompt to retrieve images""" ,required=lowercase ,type=lowercase )
parser.add_argument("""--class_data_dir""" ,help="""path to save images""" ,required=lowercase ,type=lowercase )
parser.add_argument("""--num_class_images""" ,help="""number of images to download""" ,default=2_00 ,type=lowercase )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 356
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
for param in module.parameters():
_UpperCAmelCase = False
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = plt.imshow(lowercase )
fig.axes.get_xaxis().set_visible(lowercase )
fig.axes.get_yaxis().set_visible(lowercase )
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = datetime.now()
_UpperCAmelCase = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 357
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'layoutlmv3'
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class a ( lowerCAmelCase_ ):
_snake_case : str = version.parse('1.12' )
@property
def lowerCAmelCase_ ( self : Dict ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 30
| 0
|
"""simple docstring"""
import os
def __UpperCAmelCase ( lowercase = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(lowercase ) ,lowercase ) ) as input_file:
_UpperCAmelCase = [
[int(lowercase ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = len(matrix[0] )
_UpperCAmelCase = [[-1 for _ in range(lowercase )] for _ in range(lowercase )]
for i in range(lowercase ):
_UpperCAmelCase = matrix[i][0]
for j in range(1 ,lowercase ):
for i in range(lowercase ):
_UpperCAmelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 ,lowercase ):
_UpperCAmelCase = min(
minimal_path_sums[i][j] ,minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 ,-1 ,-1 ):
_UpperCAmelCase = min(
minimal_path_sums[i][j] ,minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 358
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( lowercase=None ,lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case : Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
int(lowercase )
return True
except ValueError:
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
float(lowercase )
return True
except ValueError:
return False
class a :
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]["""result"""]
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" )
title_str += f''' {label_model_name} vs.'''
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase , _UpperCAmelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_UpperCAmelCase = result + left + right
return input_list
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if len(lowercase ) <= 1:
return input_list
_UpperCAmelCase = list(lowercase )
# iteration for two-way merging
_UpperCAmelCase = 2
while p <= len(lowercase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 ,len(lowercase ) ,lowercase ):
_UpperCAmelCase = i
_UpperCAmelCase = i + p - 1
_UpperCAmelCase = (low + high + 1) // 2
_UpperCAmelCase = merge(lowercase ,lowercase ,lowercase ,lowercase )
# final merge of last two parts
if p * 2 >= len(lowercase ):
_UpperCAmelCase = i
_UpperCAmelCase = merge(lowercase ,0 ,lowercase ,len(lowercase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
UpperCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
UpperCAmelCase__ = []
else:
UpperCAmelCase__ = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 359
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30
| 0
|
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(__lowerCAmelCase ) for s in shape] )}.npy'''
def lowerCAmelCase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : int=(4, 4, 64, 64) , __lowerCAmelCase : Dict=False ):
_UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__lowerCAmelCase , __lowerCAmelCase ) ) , dtype=__lowerCAmelCase )
return image
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[str]="CompVis/stable-diffusion-v1-4" ):
_UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase = """bf16""" if fpaa else None
_UpperCAmelCase , _UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__lowerCAmelCase , subfolder="""unet""" , dtype=__lowerCAmelCase , revision=__lowerCAmelCase )
return model, params
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str=0 , __lowerCAmelCase : Tuple=(4, 77, 768) , __lowerCAmelCase : Any=False ):
_UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__lowerCAmelCase , __lowerCAmelCase ) ) , dtype=__lowerCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=__lowerCAmelCase )
_UpperCAmelCase = self.get_latents(__lowerCAmelCase , fpaa=__lowerCAmelCase )
_UpperCAmelCase = self.get_encoder_hidden_states(__lowerCAmelCase , fpaa=__lowerCAmelCase )
_UpperCAmelCase = model.apply(
{"""params""": params} , __lowerCAmelCase , jnp.array(__lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__lowerCAmelCase , ).sample
assert sample.shape == latents.shape
_UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_UpperCAmelCase = jnp.array(__lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : int ):
_UpperCAmelCase , _UpperCAmelCase = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=__lowerCAmelCase )
_UpperCAmelCase = self.get_latents(__lowerCAmelCase , shape=(4, 4, 96, 96) , fpaa=__lowerCAmelCase )
_UpperCAmelCase = self.get_encoder_hidden_states(__lowerCAmelCase , shape=(4, 77, 1024) , fpaa=__lowerCAmelCase )
_UpperCAmelCase = model.apply(
{"""params""": params} , __lowerCAmelCase , jnp.array(__lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__lowerCAmelCase , ).sample
assert sample.shape == latents.shape
_UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_UpperCAmelCase = jnp.array(__lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-2 )
| 360
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 16_00, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 16_00, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__lowerCAmelCase , )
assert hasattr(self , """env""" )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
# configuration for running training on smdistributed Model Parallel
_UpperCAmelCase = {
"""enabled""": True,
"""processes_per_host""": 8,
}
_UpperCAmelCase = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
_UpperCAmelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
_UpperCAmelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=__lowerCAmelCase , py_version="""py36""" , )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int ):
TrainingJobAnalytics(__lowerCAmelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] ):
# create estimator
_UpperCAmelCase = self.create_estimator(__lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
_UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __lowerCAmelCase )
| 361
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=7 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Dict=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , ):
_UpperCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def lowerCAmelCase_ ( self : Dict ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = DPTImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : List[str] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 363
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(lowercase ) ,lowercase )
return number - int(lowercase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 364
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 0
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 365
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# Checks if the entire collection has been sorted
if len(lowercase ) <= 1 or n <= 1:
return
insert_next(lowercase ,n - 1 )
rec_insertion_sort(lowercase ,n - 1 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# Checks order between adjacent elements
if index >= len(lowercase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_UpperCAmelCase , _UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowercase ,index + 1 )
if __name__ == "__main__":
UpperCAmelCase__ = input("""Enter integers separated by spaces: """)
UpperCAmelCase__ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 366
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""DeiTFeatureExtractor"""]
UpperCAmelCase__ = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 367
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : Any = ['pixel_values']
def __init__( self : Optional[int] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[int, float] = 1 / 255 , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Tuple , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 224}
_UpperCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
_UpperCAmelCase = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_flip_channel_order
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PIL.Image.BILINEAR , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Optional[Any] , ):
_UpperCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
_UpperCAmelCase = get_resize_output_image_size(__lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCAmelCase )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : int , ):
_UpperCAmelCase = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(__lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[int, float] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : str , ):
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None ):
return flip_channel_order(__lowerCAmelCase , data_format=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : ImageInput , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : float = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : Optional[Any] , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" )
_UpperCAmelCase = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_UpperCAmelCase = [self.flip_channel_order(image=__lowerCAmelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
_UpperCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Tuple] = None ):
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__lowerCAmelCase ):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(__lowerCAmelCase ) ):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__lowerCAmelCase )
_UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowerCAmelCase )
else:
_UpperCAmelCase = logits.argmax(dim=1 )
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 368
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class a ( unittest.TestCase ):
def __init__( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : List[Any]=18 , __lowerCAmelCase : Optional[int]=30 , __lowerCAmelCase : Optional[int]=400 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : int=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __lowerCAmelCase : Dict=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __lowerCAmelCase : Union[str, Any]=True , ):
_UpperCAmelCase = size if size is not None else {"""height""": 224, """width""": 224}
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_convert_rgb
def lowerCAmelCase_ ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : str=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_UpperCAmelCase = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_UpperCAmelCase = []
for i in range(self.batch_size ):
_UpperCAmelCase , _UpperCAmelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_UpperCAmelCase = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
_UpperCAmelCase = [torch.from_numpy(__lowerCAmelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Any = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=__lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_convert_rgb""" ) )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : int ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : List[Any] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : List[Any] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__lowerCAmelCase )
_UpperCAmelCase = 3
@property
def lowerCAmelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_convert_rgb""" ) )
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 369
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 0
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 0
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class a ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : int ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Tuple=None ):
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if prompt is not None:
_UpperCAmelCase = prompt
if generate_kwargs is not None:
_UpperCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_UpperCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_UpperCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Optional[int] ):
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[str]=None ):
_UpperCAmelCase = load_image(__lowerCAmelCase )
if prompt is not None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
_UpperCAmelCase = self.model.config.model_type
if model_type == "git":
_UpperCAmelCase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids
_UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids
_UpperCAmelCase = torch.tensor(__lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_UpperCAmelCase = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_UpperCAmelCase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_UpperCAmelCase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_UpperCAmelCase = None
return model_inputs
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , __lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_UpperCAmelCase = None
if generate_kwargs is None:
_UpperCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_UpperCAmelCase = model_inputs.pop(self.model.main_input_name )
_UpperCAmelCase = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase )
return model_outputs
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : int ):
_UpperCAmelCase = []
for output_ids in model_outputs:
_UpperCAmelCase = {
"""generated_text""": self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
| 371
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
| 0
|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : Optional[Any] = 'masked_bert'
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any]=3_0522 , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[Any]=512 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : str=1e-1_2 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[int]="topK" , __lowerCAmelCase : Dict="constant" , __lowerCAmelCase : List[Any]=0.0 , **__lowerCAmelCase : Tuple , ):
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = pruning_method
_UpperCAmelCase = mask_init
_UpperCAmelCase = mask_scale
| 350
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
if isinstance(lowercase ,lowercase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase ) )
elif isinstance(lowercase ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase ) )
elif isinstance(lowercase ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
for d in reversed(lowercase ):
idx.append(flat_idx % d )
_UpperCAmelCase = flat_idx // d
return tuple(reversed(lowercase ) )
@torch.jit.ignore
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase = None ,lowercase = None ,):
"""simple docstring"""
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowercase ) -> None:
_UpperCAmelCase = True
for i in range(len(lowercase ) ):
_UpperCAmelCase = -1 * (i + 1)
l[reversed_idx] &= tally
_UpperCAmelCase = l[reversed_idx]
if start_edges is None:
_UpperCAmelCase = [s == 0 for s in start]
reduce_edge_list(lowercase )
if end_edges is None:
_UpperCAmelCase = [e == (d - 1) for e, d in zip(lowercase ,lowercase )]
reduce_edge_list(lowercase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase ) == 0:
return [()]
elif len(lowercase ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
_UpperCAmelCase = []
_UpperCAmelCase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase ,lowercase ):
if s == e:
path_list.append(slice(lowercase ,s + 1 ) )
else:
break
_UpperCAmelCase = tuple(lowercase )
_UpperCAmelCase = len(lowercase )
# start == end, and we're done
if divergence_idx == len(lowercase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCAmelCase = start[divergence_idx]
return tuple(
path + (slice(lowercase ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCAmelCase = end[divergence_idx]
return tuple(
path + (slice(lowercase ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_UpperCAmelCase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = t.shape[:no_batch_dims]
_UpperCAmelCase = list(_flat_idx_to_idx(lowercase ,lowercase ) )
# _get_minimal_slice_set is inclusive
_UpperCAmelCase = list(_flat_idx_to_idx(flat_end - 1 ,lowercase ) )
# Get an ordered list of slices to perform
_UpperCAmelCase = _get_minimal_slice_set(
lowercase ,lowercase ,lowercase ,)
_UpperCAmelCase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase = False ,lowercase = None ,lowercase = False ,):
"""simple docstring"""
if not (len(lowercase ) > 0):
raise ValueError("""Must provide at least one input""" )
_UpperCAmelCase = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase )]
_UpperCAmelCase = tuple([max(lowercase ) for s in zip(*lowercase )] )
def _prep_inputs(lowercase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_UpperCAmelCase = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
_UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_UpperCAmelCase = tensor_tree_map(_prep_inputs ,lowercase )
_UpperCAmelCase = None
if _out is not None:
_UpperCAmelCase = tensor_tree_map(lambda lowercase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
_UpperCAmelCase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_UpperCAmelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_UpperCAmelCase = 0
_UpperCAmelCase = prepped_outputs
for _ in range(lowercase ):
# Chunk the input
if not low_mem:
_UpperCAmelCase = _select_chunk
else:
_UpperCAmelCase = partial(
_chunk_slice ,flat_start=lowercase ,flat_end=min(lowercase ,i + chunk_size ) ,no_batch_dims=len(lowercase ) ,)
_UpperCAmelCase = tensor_tree_map(lowercase ,lowercase )
# Run the layer on the chunk
_UpperCAmelCase = layer(**lowercase )
# Allocate space for the output
if out is None:
_UpperCAmelCase = tensor_tree_map(lambda lowercase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,lowercase )
# Put the chunk in its pre-allocated space
if isinstance(lowercase ,lowercase ):
def assign(lowercase ,lowercase ) -> None:
for k, v in da.items():
if isinstance(lowercase ,lowercase ):
assign(lowercase ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_UpperCAmelCase = da[k]
assign(lowercase ,lowercase )
elif isinstance(lowercase ,lowercase ):
for xa, xa in zip(lowercase ,lowercase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_UpperCAmelCase = xa
elif isinstance(lowercase ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_UpperCAmelCase = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
_UpperCAmelCase = tensor_tree_map(lambda lowercase : t.view(orig_batch_dims + t.shape[1:] ) ,lowercase )
return out
class a :
def __init__( self : str , __lowerCAmelCase : int = 512 , ):
_UpperCAmelCase = max_chunk_size
_UpperCAmelCase = None
_UpperCAmelCase = None
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Callable , __lowerCAmelCase : tuple , __lowerCAmelCase : int ):
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_UpperCAmelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
_UpperCAmelCase = [c for c in candidates if c > min_chunk_size]
_UpperCAmelCase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__lowerCAmelCase : int ) -> bool:
try:
with torch.no_grad():
fn(*__lowerCAmelCase , chunk_size=__lowerCAmelCase )
return True
except RuntimeError:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = len(__lowerCAmelCase ) - 1
while i > min_viable_chunk_size_index:
_UpperCAmelCase = test_chunk_size(candidates[i] )
if not viable:
_UpperCAmelCase = (min_viable_chunk_size_index + i) // 2
else:
_UpperCAmelCase = i
_UpperCAmelCase = (i + len(__lowerCAmelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Iterable , __lowerCAmelCase : Iterable ):
_UpperCAmelCase = True
for aa, aa in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert type(__lowerCAmelCase ) == type(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda __lowerCAmelCase : x[0] )]
_UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda __lowerCAmelCase : x[0] )]
consistent &= self._compare_arg_caches(__lowerCAmelCase , __lowerCAmelCase )
else:
consistent &= aa == aa
return consistent
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Callable , __lowerCAmelCase : tuple , __lowerCAmelCase : int , ):
_UpperCAmelCase = True
_UpperCAmelCase = tree_map(lambda __lowerCAmelCase : a.shape if isinstance(__lowerCAmelCase , torch.Tensor ) else a , __lowerCAmelCase , __lowerCAmelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__lowerCAmelCase )
_UpperCAmelCase = self._compare_arg_caches(self.cached_arg_data , __lowerCAmelCase )
else:
# Otherwise, we can reuse the precomputed value
_UpperCAmelCase = False
if not consistent:
_UpperCAmelCase = self._determine_favorable_chunk_size(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
_UpperCAmelCase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 351
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
| 0
|
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase__ = 2
class a :
def __init__( self : List[Any] , *, # begin keyword-only arguments
__lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Optional[int]="<pad>" , __lowerCAmelCase : Any="</s>" , __lowerCAmelCase : List[str]="<unk>" , __lowerCAmelCase : Any=None , ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = bos, unk, pad, eos
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = self.add_symbol(__lowerCAmelCase )
_UpperCAmelCase = self.add_symbol(__lowerCAmelCase )
_UpperCAmelCase = self.add_symbol(__lowerCAmelCase )
_UpperCAmelCase = self.add_symbol(__lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__lowerCAmelCase )
_UpperCAmelCase = len(self.symbols )
def __eq__( self : str , __lowerCAmelCase : Optional[Any] ):
return self.indices == other.indices
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ):
return len(self.symbols )
def __contains__( self : Optional[int] , __lowerCAmelCase : List[Any] ):
return sym in self.indices
@classmethod
def lowerCAmelCase_ ( cls : Dict , __lowerCAmelCase : str ):
_UpperCAmelCase = cls()
d.add_from_file(__lowerCAmelCase )
return d
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple=1 , __lowerCAmelCase : Tuple=False ):
if word in self.indices and not overwrite:
_UpperCAmelCase = self.indices[word]
_UpperCAmelCase = self.count[idx] + n
return idx
else:
_UpperCAmelCase = len(self.symbols )
_UpperCAmelCase = idx
self.symbols.append(__lowerCAmelCase )
self.count.append(__lowerCAmelCase )
return idx
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Optional[int] ):
return 0
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(__lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(__lowerCAmelCase ) )
return
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = self._load_meta(__lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
_UpperCAmelCase , _UpperCAmelCase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = line.rsplit(""" """ , 1 )
else:
_UpperCAmelCase = False
_UpperCAmelCase = int(__lowerCAmelCase )
_UpperCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(__lowerCAmelCase ) )
self.add_symbol(__lowerCAmelCase , n=__lowerCAmelCase , overwrite=__lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = dict((re.sub(R"""@@$""" ,"""""" ,lowercase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" ,"""</w>""" ,lowercase ), v) for k, v in d.items() )
_UpperCAmelCase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
_UpperCAmelCase = d[k] # restore
return da
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# prep
if not os.path.exists(lowercase ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(lowercase ,exist_ok=lowercase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_UpperCAmelCase = os.path.join(lowercase ,"""checkpoint.pt""" )
if not os.path.isfile(lowercase ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
_UpperCAmelCase = torch.load(lowercase ,map_location="""cpu""" )
_UpperCAmelCase = chkpt["""cfg"""]["""model"""]
# dicts
_UpperCAmelCase = os.path.join(lowercase ,"""dict.txt""" )
if not os.path.isfile(lowercase ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
_UpperCAmelCase = Dictionary.load(lowercase )
_UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = os.path.join(lowercase ,VOCAB_FILES_NAMES["""vocab_file"""] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase ,ensure_ascii=lowercase ,indent=lowercase ) )
# merges_file (bpecodes)
_UpperCAmelCase = os.path.join(lowercase ,"""bpecodes""" )
if not os.path.isfile(lowercase ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
_UpperCAmelCase = os.path.join(lowercase ,VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowercase ,lowercase )
# model config
_UpperCAmelCase = os.path.join(lowercase ,"""config.json""" )
_UpperCAmelCase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1E-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase ,ensure_ascii=lowercase ,indent=lowercase ) )
# tokenizer config
_UpperCAmelCase = os.path.join(lowercase ,lowercase )
_UpperCAmelCase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 10_24,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase ,ensure_ascii=lowercase ,indent=lowercase ) )
# model
_UpperCAmelCase = chkpt["""model"""]
# remove unneeded keys
_UpperCAmelCase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowercase ,lowercase )
_UpperCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
_UpperCAmelCase = model_state_dict.pop(lowercase )
else:
_UpperCAmelCase = model_state_dict.pop(lowercase )
_UpperCAmelCase = BioGptConfig.from_pretrained(lowercase )
_UpperCAmelCase = BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
_UpperCAmelCase = os.path.join(lowercase ,lowercase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(lowercase ,lowercase )
print("""Conversion is done!""" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 352
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = OmegaConf.load(lowercase )
_UpperCAmelCase = torch.load(lowercase ,map_location="""cpu""" )["""model"""]
_UpperCAmelCase = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase = {}
_UpperCAmelCase = """first_stage_model."""
for key in keys:
if key.startswith(lowercase ):
_UpperCAmelCase = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase = {}
_UpperCAmelCase = """model.diffusion_model."""
for key in keys:
if key.startswith(lowercase ):
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase = config.model.params.first_stage_config.params
_UpperCAmelCase = config.model.params.unet_config.params
_UpperCAmelCase = VQModel(**lowercase ).eval()
vqvae.load_state_dict(lowercase )
_UpperCAmelCase = UNetLDMModel(**lowercase ).eval()
unet.load_state_dict(lowercase )
_UpperCAmelCase = DDIMScheduler(
timesteps=config.model.params.timesteps ,beta_schedule="""scaled_linear""" ,beta_start=config.model.params.linear_start ,beta_end=config.model.params.linear_end ,clip_sample=lowercase ,)
_UpperCAmelCase = LDMPipeline(lowercase ,lowercase ,lowercase )
pipeline.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 353
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = AlbertConfig.from_json_file(lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_UpperCAmelCase = AlbertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase__ = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
UpperCAmelCase__ = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
UpperCAmelCase__ = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
UpperCAmelCase__ = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
UpperCAmelCase__ = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Tuple ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=[1, 10, 100] , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Union[str, Any]=3.0 ):
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__lowerCAmelCase ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__lowerCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + """\n""" + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(__lowerCAmelCase , *__lowerCAmelCase )
futures.append(__lowerCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__lowerCAmelCase ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_UpperCAmelCase , _UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(__lowerCAmelCase ) )
correct.append(sum(__lowerCAmelCase ) )
_UpperCAmelCase = np.array(__lowerCAmelCase )
_UpperCAmelCase = np.array(__lowerCAmelCase )
_UpperCAmelCase = k
_UpperCAmelCase = {f'''pass@{k}''': estimate_pass_at_k(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
def estimator(lowercase ,lowercase ,lowercase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(lowercase ,lowercase ):
_UpperCAmelCase = itertools.repeat(lowercase ,len(lowercase ) )
else:
assert len(lowercase ) == len(lowercase )
_UpperCAmelCase = iter(lowercase )
return np.array([estimator(int(lowercase ) ,int(lowercase ) ,lowercase ) for n, c in zip(lowercase ,lowercase )] )
| 355
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30
| 0
|
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class a ( lowerCAmelCase_ ):
def __init__( self : Tuple , __lowerCAmelCase : Tuple=0.01 , __lowerCAmelCase : Dict=1000 ):
_UpperCAmelCase = p_stop
_UpperCAmelCase = max_length
def __iter__( self : int ):
_UpperCAmelCase = 0
_UpperCAmelCase = False
while not stop and count < self.max_length:
yield count
count += 1
_UpperCAmelCase = random.random() < self.p_stop
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=False , __lowerCAmelCase : Tuple=True ):
_UpperCAmelCase = [
BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
for i in range(2 )
]
_UpperCAmelCase = [list(__lowerCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCAmelCase ) for shard in batch_sampler_shards] , [len(__lowerCAmelCase ) for e in expected] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
# Check the shards when the dataset is a round multiple of total batch size.
_UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is very small.
_UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
# Check the shards when the dataset is a round multiple of batch size.
_UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# Check the shards when the dataset is a round multiple of total batch size.
_UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
# Check the shards when the dataset is a round multiple of batch size.
_UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_UpperCAmelCase = [BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , even_batches=__lowerCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict=False , __lowerCAmelCase : int=2 , __lowerCAmelCase : Tuple=False ):
random.seed(__lowerCAmelCase )
_UpperCAmelCase = list(__lowerCAmelCase )
_UpperCAmelCase = [
IterableDatasetShard(
__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase , num_processes=__lowerCAmelCase , process_index=__lowerCAmelCase , split_batches=__lowerCAmelCase , )
for i in range(__lowerCAmelCase )
]
_UpperCAmelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCAmelCase )
iterable_dataset_lists.append(list(__lowerCAmelCase ) )
_UpperCAmelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_UpperCAmelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(len(__lowerCAmelCase ) % shard_batch_size == 0 )
_UpperCAmelCase = []
for idx in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCAmelCase ) < len(__lowerCAmelCase ):
reference += reference
self.assertListEqual(__lowerCAmelCase , reference[: len(__lowerCAmelCase )] )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = 42
_UpperCAmelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
# Edge case with a very small dataset
_UpperCAmelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_UpperCAmelCase = SkipBatchSampler(__lowerCAmelCase , 2 )
self.assertListEqual(list(__lowerCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
_UpperCAmelCase = skip_first_batches(__lowerCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowerCAmelCase_ ( self : Optional[int] ):
Accelerator()
_UpperCAmelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 356
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
| 0
|
import math
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [True] * n
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
_UpperCAmelCase = i * 2
while index < n:
_UpperCAmelCase = False
_UpperCAmelCase = index + i
_UpperCAmelCase = [2]
for i in range(3 ,lowercase ,2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def __UpperCAmelCase ( lowercase = 99_99_66_66_33_33 ):
"""simple docstring"""
_UpperCAmelCase = math.floor(math.sqrt(lowercase ) ) + 1_00
_UpperCAmelCase = prime_sieve(lowercase )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = primes[prime_index]
while (last_prime**2) <= limit:
_UpperCAmelCase = primes[prime_index + 1]
_UpperCAmelCase = last_prime**2
_UpperCAmelCase = next_prime**2
# Get numbers divisible by lps(current)
_UpperCAmelCase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_UpperCAmelCase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_UpperCAmelCase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_UpperCAmelCase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 357
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'layoutlmv3'
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class a ( lowerCAmelCase_ ):
_snake_case : str = version.parse('1.12' )
@property
def lowerCAmelCase_ ( self : Dict ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_UpperCAmelCase = []
for char_count in range(lowercase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(lowercase )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 358
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( lowercase=None ,lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case : Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
int(lowercase )
return True
except ValueError:
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
float(lowercase )
return True
except ValueError:
return False
class a :
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]["""result"""]
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" )
title_str += f''' {label_model_name} vs.'''
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = int(number**0.5 )
return number == sq * sq
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_UpperCAmelCase = x_den * y_den * z_den
_UpperCAmelCase = gcd(lowercase ,lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def __UpperCAmelCase ( lowercase = 35 ):
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = 42
_UpperCAmelCase = Fraction(0 )
_UpperCAmelCase = 42
for x_num in range(1 ,order + 1 ):
for x_den in range(x_num + 1 ,order + 1 ):
for y_num in range(1 ,order + 1 ):
for y_den in range(y_num + 1 ,order + 1 ):
# n=1
_UpperCAmelCase = x_num * y_den + x_den * y_num
_UpperCAmelCase = x_den * y_den
_UpperCAmelCase = gcd(lowercase ,lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
unique_s.add(lowercase )
# n=2
_UpperCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_UpperCAmelCase = x_den * x_den * y_den * y_den
if is_sq(lowercase ) and is_sq(lowercase ):
_UpperCAmelCase = int(sqrt(lowercase ) )
_UpperCAmelCase = int(sqrt(lowercase ) )
_UpperCAmelCase = gcd(lowercase ,lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
unique_s.add(lowercase )
# n=-1
_UpperCAmelCase = x_num * y_num
_UpperCAmelCase = x_den * y_num + x_num * y_den
_UpperCAmelCase = gcd(lowercase ,lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
unique_s.add(lowercase )
# n=2
_UpperCAmelCase = x_num * x_num * y_num * y_num
_UpperCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowercase ) and is_sq(lowercase ):
_UpperCAmelCase = int(sqrt(lowercase ) )
_UpperCAmelCase = int(sqrt(lowercase ) )
_UpperCAmelCase = gcd(lowercase ,lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
unique_s.add(lowercase )
for num, den in unique_s:
total += Fraction(lowercase ,lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 359
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""ConditionalDetrFeatureExtractor"""]
UpperCAmelCase__ = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Union[str, Any] = UnCLIPImageVariationPipeline
_snake_case : Optional[int] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
_snake_case : int = IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Dict = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
_snake_case : Dict = False
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return 32
@property
def lowerCAmelCase_ ( self : Tuple ):
return 32
@property
def lowerCAmelCase_ ( self : str ):
return self.time_input_dim
@property
def lowerCAmelCase_ ( self : Dict ):
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return 100
@property
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase_ ( self : Dict ):
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Dict ):
torch.manual_seed(0 )
_UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
_UpperCAmelCase = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
_UpperCAmelCase = UnCLIPTextProjModel(**__lowerCAmelCase )
return model
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
_UpperCAmelCase = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
_UpperCAmelCase = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def lowerCAmelCase_ ( self : List[Any] ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowerCAmelCase_ ( self : Any ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowerCAmelCase_ ( self : int ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
_UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.dummy_decoder
_UpperCAmelCase = self.dummy_text_proj
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = self.dummy_tokenizer
_UpperCAmelCase = self.dummy_super_res_first
_UpperCAmelCase = self.dummy_super_res_last
_UpperCAmelCase = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
_UpperCAmelCase = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
_UpperCAmelCase = CLIPImageProcessor(crop_size=32 , size=32 )
_UpperCAmelCase = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : str=True ):
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
if pil_image:
_UpperCAmelCase = input_image * 0.5 + 0.5
_UpperCAmelCase = input_image.clamp(0 , 1 )
_UpperCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCAmelCase = DiffusionPipeline.numpy_to_pil(__lowerCAmelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
_UpperCAmelCase = pipe(**__lowerCAmelCase )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
_UpperCAmelCase = pipe(
**__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
_UpperCAmelCase = pipe(**__lowerCAmelCase )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
_UpperCAmelCase = pipe(
**__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
_UpperCAmelCase = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
_UpperCAmelCase = pipe(**__lowerCAmelCase )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
_UpperCAmelCase = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
_UpperCAmelCase = pipe(
**__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_UpperCAmelCase = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = torch.device("""cpu""" )
class a :
_snake_case : Union[str, Any] = 1
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_UpperCAmelCase = pipe.decoder.dtype
_UpperCAmelCase = 1
_UpperCAmelCase = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_UpperCAmelCase = pipe.prepare_latents(
__lowerCAmelCase , dtype=__lowerCAmelCase , device=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , scheduler=DummyScheduler() )
_UpperCAmelCase = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_UpperCAmelCase = pipe.prepare_latents(
__lowerCAmelCase , dtype=__lowerCAmelCase , device=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , scheduler=DummyScheduler() )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
_UpperCAmelCase = pipe(
**__lowerCAmelCase , decoder_latents=__lowerCAmelCase , super_res_latents=__lowerCAmelCase ).images
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase )
# Don't pass image, instead pass embedding
_UpperCAmelCase = pipeline_inputs.pop("""image""" )
_UpperCAmelCase = pipe.image_encoder(__lowerCAmelCase ).image_embeds
_UpperCAmelCase = pipe(
**__lowerCAmelCase , decoder_latents=__lowerCAmelCase , super_res_latents=__lowerCAmelCase , image_embeddings=__lowerCAmelCase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_UpperCAmelCase = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__lowerCAmelCase , expected_max_diff=__lowerCAmelCase )
@skip_mps
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = torch_device == """cpu"""
_UpperCAmelCase = True
_UpperCAmelCase = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=__lowerCAmelCase , relax_max_difference=__lowerCAmelCase , additional_params_copy_to_batched_inputs=__lowerCAmelCase , )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_UpperCAmelCase = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__lowerCAmelCase , additional_params_copy_to_batched_inputs=__lowerCAmelCase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__lowerCAmelCase )
@skip_mps
def lowerCAmelCase_ ( self : Any ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase_ ( self : Dict ):
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase_ ( self : List[Any] ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
_UpperCAmelCase = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
_UpperCAmelCase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = pipeline(
__lowerCAmelCase , generator=__lowerCAmelCase , output_type="""np""" , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase , 15 )
| 361
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
| 0
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase__ = """docs/source/en/_toctree.yml"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = defaultdict(lowercase )
for doc in model_doc:
counts[doc["local"]] += 1
_UpperCAmelCase = [key for key, value in counts.items() if value > 1]
_UpperCAmelCase = []
for duplicate_key in duplicates:
_UpperCAmelCase = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(lowercase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(lowercase ,key=lambda lowercase : s["title"].lower() )
def __UpperCAmelCase ( lowercase=False ):
"""simple docstring"""
with open(lowercase ,encoding="""utf-8""" ) as f:
_UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCAmelCase = content[api_idx]["""sections"""]
# Then to the model doc
_UpperCAmelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_UpperCAmelCase = api_doc[model_idx]["""sections"""]
_UpperCAmelCase = [(idx, section) for idx, section in enumerate(lowercase ) if """sections""" in section]
_UpperCAmelCase = False
for idx, modality_doc in modalities_docs:
_UpperCAmelCase = modality_doc["""sections"""]
_UpperCAmelCase = clean_model_doc_toc(lowercase )
if old_modality_doc != new_modality_doc:
_UpperCAmelCase = True
if overwrite:
_UpperCAmelCase = new_modality_doc
if diff:
if overwrite:
_UpperCAmelCase = model_doc
_UpperCAmelCase = api_doc
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowercase ,allow_unicode=lowercase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 362
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if index == number_of_items:
return 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = knapsack(lowercase ,lowercase ,lowercase ,lowercase ,index + 1 )
if weights[index] <= max_weight:
_UpperCAmelCase = values[index] + knapsack(
lowercase ,lowercase ,lowercase ,max_weight - weights[index] ,index + 1 )
return max(lowercase ,lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 0
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 364
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 0
|
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [1]
for i in range(2 ,lowercase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_UpperCAmelCase = []
_UpperCAmelCase = list(range(lowercase ) )
# Find permutation
while factorials:
_UpperCAmelCase = factorials.pop()
_UpperCAmelCase , _UpperCAmelCase = divmod(lowercase ,lowercase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 366
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 0
|
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCAmelCase__ = """src/diffusers"""
# Matches is_xxx_available()
UpperCAmelCase__ = re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
UpperCAmelCase__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
UpperCAmelCase__ = """
{0} = None
"""
UpperCAmelCase__ = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
UpperCAmelCase__ = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = _re_backend.findall(lowercase )
if len(lowercase ) == 0:
return None
return "_and_".join(lowercase )
def __UpperCAmelCase ( ):
"""simple docstring"""
with open(os.path.join(lowercase ,"""__init__.py""" ) ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
_UpperCAmelCase = 0
_UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
_UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(lowercase ) and len(lines[line_index] ) > 1:
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _re_single_line_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase ) > 0:
_UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase ,lowercase )
else:
return DUMMY_CLASS.format(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase=None ):
"""simple docstring"""
if backend_specific_objects is None:
_UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
_UpperCAmelCase = """[""" + """, """.join(f'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
_UpperCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase ,lowercase ) for o in objects] )
_UpperCAmelCase = dummy_file
return dummy_files
def __UpperCAmelCase ( lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_UpperCAmelCase = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
_UpperCAmelCase = os.path.join(lowercase ,"""utils""" )
_UpperCAmelCase = {
backend: os.path.join(lowercase ,f'''dummy_{short_names.get(lowercase ,lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
_UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase ):
with open(lowercase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_UpperCAmelCase = f.read()
else:
_UpperCAmelCase = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(lowercase ,lowercase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'''diffusers.utils.dummy_{short_names.get(lowercase ,lowercase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 367
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 368
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0] * len(lowercase )
for i in range(1 ,len(lowercase ) ):
# use last results for better performance - dynamic programming
_UpperCAmelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_UpperCAmelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_UpperCAmelCase = j
return prefix_result
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return max(prefix_function(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 0
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a ( lowerCAmelCase_ ):
_snake_case : Tuple = ''
_snake_case : List[str] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[DatasetInfo] = None , __lowerCAmelCase : Optional[str] = None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(self , **__lowerCAmelCase )
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def lowerCAmelCase_ ( self : int ):
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {"""name""": str(__lowerCAmelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : str = "rb" , **__lowerCAmelCase : List[Any] , ):
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
_UpperCAmelCase = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Any , **__lowerCAmelCase : Dict ):
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any]=False , **__lowerCAmelCase : Tuple ):
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip("""/""" ) )
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip("""/""" ) )
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 370
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 0
|
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self : Dict , *,
__lowerCAmelCase : int = 4 , __lowerCAmelCase : int = 768 , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , ):
super().__init__()
_UpperCAmelCase = nn.Parameter(torch.zeros(__lowerCAmelCase ) )
# parameters for additional clip time embeddings
_UpperCAmelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# parameters for encoder hidden states
_UpperCAmelCase = clip_extra_context_tokens
_UpperCAmelCase = nn.Linear(
__lowerCAmelCase , self.clip_extra_context_tokens * cross_attention_dim )
_UpperCAmelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.LayerNorm(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , *, __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_UpperCAmelCase = image_embeddings.shape[0]
_UpperCAmelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_UpperCAmelCase = classifier_free_guidance_embeddings.expand(
__lowerCAmelCase , -1 )
_UpperCAmelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_UpperCAmelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_UpperCAmelCase = self.embedding_proj(__lowerCAmelCase )
_UpperCAmelCase = self.clip_image_embeddings_project_to_time_embeddings(__lowerCAmelCase )
_UpperCAmelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_UpperCAmelCase = self.clip_extra_context_tokens_proj(__lowerCAmelCase )
_UpperCAmelCase = clip_extra_context_tokens.reshape(__lowerCAmelCase , -1 , self.clip_extra_context_tokens )
_UpperCAmelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
_UpperCAmelCase = self.encoder_hidden_states_proj(__lowerCAmelCase )
_UpperCAmelCase = self.text_encoder_hidden_states_norm(__lowerCAmelCase )
_UpperCAmelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 371
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
| 0
|
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = None ):
"""simple docstring"""
_UpperCAmelCase = tesseract_config if tesseract_config is not None else """"""
# apply OCR
_UpperCAmelCase = to_pil_image(lowercase )
_UpperCAmelCase , _UpperCAmelCase = pil_image.size
_UpperCAmelCase = pytesseract.image_to_data(lowercase ,lang=lowercase ,output_type="""dict""" ,config=lowercase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_UpperCAmelCase = [idx for idx, word in enumerate(lowercase ) if not word.strip()]
_UpperCAmelCase = [word for idx, word in enumerate(lowercase ) if idx not in irrelevant_indices]
_UpperCAmelCase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
_UpperCAmelCase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
_UpperCAmelCase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
_UpperCAmelCase = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_UpperCAmelCase = []
for x, y, w, h in zip(lowercase ,lowercase ,lowercase ,lowercase ):
_UpperCAmelCase = [x, y, x + w, y + h]
actual_boxes.append(lowercase )
# finally, normalize the bounding boxes
_UpperCAmelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase ,lowercase ,lowercase ) )
assert len(lowercase ) == len(lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a ( lowerCAmelCase_ ):
_snake_case : Any = ['pixel_values']
def __init__( self : List[str] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = "" , **__lowerCAmelCase : Dict , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = size if size is not None else {"""height""": 224, """width""": 224}
_UpperCAmelCase = get_size_dict(__lowerCAmelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = apply_ocr
_UpperCAmelCase = ocr_lang
_UpperCAmelCase = tesseract_config
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Union[str, Any] , ):
_UpperCAmelCase = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_UpperCAmelCase = (size["""height"""], size["""width"""])
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : ImageInput , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : Tuple , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__lowerCAmelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
_UpperCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
_UpperCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
_UpperCAmelCase = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__lowerCAmelCase ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_UpperCAmelCase = []
_UpperCAmelCase = []
for image in images:
_UpperCAmelCase , _UpperCAmelCase = apply_tesseract(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
words_batch.append(__lowerCAmelCase )
boxes_batch.append(__lowerCAmelCase )
if do_resize:
_UpperCAmelCase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_UpperCAmelCase = [flip_channel_order(__lowerCAmelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
_UpperCAmelCase = BatchFeature(data={"""pixel_values""": images} , tensor_type=__lowerCAmelCase )
if apply_ocr:
_UpperCAmelCase = words_batch
_UpperCAmelCase = boxes_batch
return data
| 350
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
_UpperCAmelCase = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" ,lowercase )
if matches:
_UpperCAmelCase = float(matches[1] )
_UpperCAmelCase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_UpperCAmelCase = 10_01
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ) + 1: v for k, v in idalabel.items()}
_UpperCAmelCase = """background"""
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = get_mobilenet_va_config(lowercase )
# Load 🤗 model
_UpperCAmelCase = MobileNetVaForImageClassification(lowercase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase ,lowercase ,lowercase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_UpperCAmelCase = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} ,size={"""shortest_edge""": config.image_size + 32} ,)
_UpperCAmelCase = image_processor(images=prepare_img() ,return_tensors="""pt""" )
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
_UpperCAmelCase = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
_UpperCAmelCase = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
_UpperCAmelCase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] ,lowercase ,atol=1E-4 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
_UpperCAmelCase = """google/""" + model_name
image_processor.push_to_hub(lowercase )
model.push_to_hub(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 351
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase__ = 1.6021E-19 # units = C
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
import torch
from torch import nn
class a ( nn.Module ):
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[int]=False ):
super().__init__()
_UpperCAmelCase = n_token
_UpperCAmelCase = d_embed
_UpperCAmelCase = d_proj
_UpperCAmelCase = cutoffs + [n_token]
_UpperCAmelCase = [0] + self.cutoffs
_UpperCAmelCase = div_val
_UpperCAmelCase = self.cutoffs[0]
_UpperCAmelCase = len(self.cutoffs ) - 1
_UpperCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
_UpperCAmelCase = nn.ModuleList()
_UpperCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
else:
self.out_projs.append(__lowerCAmelCase )
self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) )
_UpperCAmelCase = keep_order
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ):
if proj is None:
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() )
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=False ):
if labels is not None:
# Shift so that tokens < n predict n
_UpperCAmelCase = hidden[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
_UpperCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_UpperCAmelCase = labels != -100
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
_UpperCAmelCase = (
-nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase = self.out_layers[i].weight
_UpperCAmelCase = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
if labels is None:
_UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
_UpperCAmelCase = 0
_UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
_UpperCAmelCase , _UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_UpperCAmelCase = (labels >= l_idx) & (labels < r_idx)
_UpperCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_UpperCAmelCase = labels.index_select(0 , __lowerCAmelCase ) - l_idx
_UpperCAmelCase = head_logprob.index_select(0 , __lowerCAmelCase )
_UpperCAmelCase = hidden.index_select(0 , __lowerCAmelCase )
else:
_UpperCAmelCase = hidden
if i == 0:
if labels is not None:
_UpperCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_UpperCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_UpperCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Any ):
if self.n_clusters == 0:
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase = self.out_layers[i].weight
_UpperCAmelCase = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
_UpperCAmelCase , _UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = head_logprob[:, -i] + tail_logprob_i
_UpperCAmelCase = logprob_i
return out
| 353
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""ConvNextFeatureExtractor"""]
UpperCAmelCase__ = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
"""simple docstring"""
from math import pow, sqrt
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
_UpperCAmelCase = len(lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(lowercase ,lowercase )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(lowercase ,lowercase ,lowercase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(lowercase ,lowercase ,lowercase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a ,2 ) ,6 )
if validate(lowercase ,lowercase ,lowercase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a ,2 ) / molar_mass ,6 )
if validate(lowercase ,lowercase ,lowercase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 355
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30
| 0
|
"""simple docstring"""
import string
import numpy
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a ,lowercase )
class a :
_snake_case : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_snake_case : List[Any] = numpy.vectorize(lambda lowerCAmelCase_ : x % 36 )
_snake_case : Union[str, Any] = numpy.vectorize(lowerCAmelCase_ )
def __init__( self : Union[str, Any] , __lowerCAmelCase : numpy.ndarray ):
_UpperCAmelCase = self.modulus(__lowerCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_UpperCAmelCase = encrypt_key.shape[0]
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : str ):
return self.key_string.index(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
return self.key_string[round(__lowerCAmelCase )]
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_UpperCAmelCase = det % len(self.key_string )
_UpperCAmelCase = len(self.key_string )
if greatest_common_divisor(__lowerCAmelCase , len(self.key_string ) ) != 1:
_UpperCAmelCase = (
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = [char for char in text.upper() if char in self.key_string]
_UpperCAmelCase = chars[-1]
while len(__lowerCAmelCase ) % self.break_key != 0:
chars.append(__lowerCAmelCase )
return "".join(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = self.process_text(text.upper() )
_UpperCAmelCase = """"""
for i in range(0 , len(__lowerCAmelCase ) - self.break_key + 1 , self.break_key ):
_UpperCAmelCase = text[i : i + self.break_key]
_UpperCAmelCase = [self.replace_letters(__lowerCAmelCase ) for char in batch]
_UpperCAmelCase = numpy.array([vec] ).T
_UpperCAmelCase = self.modulus(self.encrypt_key.dot(__lowerCAmelCase ) ).T.tolist()[
0
]
_UpperCAmelCase = """""".join(
self.replace_digits(__lowerCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_UpperCAmelCase = det % len(self.key_string )
_UpperCAmelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_UpperCAmelCase = i
break
_UpperCAmelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowerCAmelCase ) )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str ):
_UpperCAmelCase = self.make_decrypt_key()
_UpperCAmelCase = self.process_text(text.upper() )
_UpperCAmelCase = """"""
for i in range(0 , len(__lowerCAmelCase ) - self.break_key + 1 , self.break_key ):
_UpperCAmelCase = text[i : i + self.break_key]
_UpperCAmelCase = [self.replace_letters(__lowerCAmelCase ) for char in batch]
_UpperCAmelCase = numpy.array([vec] ).T
_UpperCAmelCase = self.modulus(decrypt_key.dot(__lowerCAmelCase ) ).T.tolist()[0]
_UpperCAmelCase = """""".join(
self.replace_digits(__lowerCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = int(input("""Enter the order of the encryption key: """ ) )
_UpperCAmelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(lowercase ):
_UpperCAmelCase = [int(lowercase ) for x in input().split()]
hill_matrix.append(lowercase )
_UpperCAmelCase = HillCipher(numpy.array(lowercase ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
_UpperCAmelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
_UpperCAmelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(lowercase ) )
elif option == "2":
_UpperCAmelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 356
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 357
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'layoutlmv3'
def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class a ( lowerCAmelCase_ ):
_snake_case : str = version.parse('1.12' )
@property
def lowerCAmelCase_ ( self : Dict ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
_UpperCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 4_00_00_00 ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowercase )
_UpperCAmelCase , _UpperCAmelCase = b, a + b
return sum(lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 358
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __UpperCAmelCase ( lowercase=None ,lowercase=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase )
@dataclass
class a :
_snake_case : str = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case : Optional[List[str]] = list_field(
default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
int(lowercase )
return True
except ValueError:
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
float(lowercase )
return True
except ValueError:
return False
class a :
def __init__( self : int , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__lowerCAmelCase )
for row in reader:
_UpperCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]["""result"""]
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )]
plt.scatter(
__lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" )
title_str += f''' {label_model_name} vs.'''
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__lowerCAmelCase )
plt.xlabel(__lowerCAmelCase )
plt.ylabel(__lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
import itertools
import math
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = 2
while True:
if is_prime(lowercase ):
yield num
num += 1
def __UpperCAmelCase ( lowercase = 1_00_01 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,lowercase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 359
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCAmelCase__ = """us-east-1""" # defaults region
@dataclass
class a :
_snake_case : str
_snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_snake_case : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self : Dict ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 30
| 0
|
"""simple docstring"""
class a :
def __init__( self : Tuple , __lowerCAmelCase : int ):
_UpperCAmelCase = size
_UpperCAmelCase = [0] * size
_UpperCAmelCase = [0] * size
@staticmethod
def lowerCAmelCase_ ( __lowerCAmelCase : int ):
return index | (index + 1)
@staticmethod
def lowerCAmelCase_ ( __lowerCAmelCase : int ):
return (index & (index + 1)) - 1
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = value
while index < self.size:
_UpperCAmelCase = self.get_prev(__lowerCAmelCase ) + 1
if current_left_border == index:
_UpperCAmelCase = value
else:
_UpperCAmelCase = max(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = self.get_next(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
right -= 1 # Because of right is exclusive
_UpperCAmelCase = 0
while left <= right:
_UpperCAmelCase = self.get_prev(__lowerCAmelCase )
if left <= current_left:
_UpperCAmelCase = max(__lowerCAmelCase , self.tree[right] )
_UpperCAmelCase = current_left
else:
_UpperCAmelCase = max(__lowerCAmelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
"""simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
UpperCAmelCase__ = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
UpperCAmelCase__ = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
UpperCAmelCase__ = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30
| 0
|
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'char'
_snake_case : int = 'bpe'
_snake_case : Optional[Any] = 'wp'
UpperCAmelCase__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = ['image_processor', 'char_tokenizer']
_snake_case : Tuple = 'ViTImageProcessor'
_snake_case : Any = 'MgpstrTokenizer'
def __init__( self : Optional[int] , __lowerCAmelCase : int=None , __lowerCAmelCase : Any=None , **__lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowerCAmelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
_UpperCAmelCase = tokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained("""gpt2""" )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self : Union[str, Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : Any ):
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_UpperCAmelCase = self.char_tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings["""input_ids"""]
return inputs
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Dict ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = sequences
_UpperCAmelCase = char_preds.size(0 )
_UpperCAmelCase , _UpperCAmelCase = self._decode_helper(__lowerCAmelCase , """char""" )
_UpperCAmelCase , _UpperCAmelCase = self._decode_helper(__lowerCAmelCase , """bpe""" )
_UpperCAmelCase , _UpperCAmelCase = self._decode_helper(__lowerCAmelCase , """wp""" )
_UpperCAmelCase = []
_UpperCAmelCase = []
for i in range(__lowerCAmelCase ):
_UpperCAmelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
_UpperCAmelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
_UpperCAmelCase = scores.index(max(__lowerCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_UpperCAmelCase = {}
_UpperCAmelCase = final_strs
_UpperCAmelCase = final_scores
_UpperCAmelCase = char_strs
_UpperCAmelCase = bpe_strs
_UpperCAmelCase = wp_strs
return out
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
if format == DecodeType.CHARACTER:
_UpperCAmelCase = self.char_decode
_UpperCAmelCase = 1
_UpperCAmelCase = """[s]"""
elif format == DecodeType.BPE:
_UpperCAmelCase = self.bpe_decode
_UpperCAmelCase = 2
_UpperCAmelCase = """#"""
elif format == DecodeType.WORDPIECE:
_UpperCAmelCase = self.wp_decode
_UpperCAmelCase = 102
_UpperCAmelCase = """[SEP]"""
else:
raise ValueError(f'''Format {format} is not supported.''' )
_UpperCAmelCase , _UpperCAmelCase = [], []
_UpperCAmelCase = pred_logits.size(0 )
_UpperCAmelCase = pred_logits.size(1 )
_UpperCAmelCase , _UpperCAmelCase = pred_logits.topk(1 , dim=-1 , largest=__lowerCAmelCase , sorted=__lowerCAmelCase )
_UpperCAmelCase = preds_index.view(-1 , __lowerCAmelCase )[:, 1:]
_UpperCAmelCase = decoder(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = torch.nn.functional.softmax(__lowerCAmelCase , dim=2 ).max(dim=2 )
_UpperCAmelCase = preds_max_prob[:, 1:]
for index in range(__lowerCAmelCase ):
_UpperCAmelCase = preds_str[index].find(__lowerCAmelCase )
_UpperCAmelCase = preds_str[index][:pred_eos]
_UpperCAmelCase = preds_index[index].cpu().tolist()
_UpperCAmelCase = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1
_UpperCAmelCase = preds_max_prob[index][: pred_eos_index + 1]
_UpperCAmelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCAmelCase )
conf_scores.append(__lowerCAmelCase )
return dec_strs, conf_scores
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Dict ):
return self.bpe_tokenizer.batch_decode(__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
| 362
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = 10
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
_UpperCAmelCase , _UpperCAmelCase = process_story(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = """"""
_UpperCAmelCase , _UpperCAmelCase = process_story(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [] )
self.assertEqual(__lowerCAmelCase , [] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__lowerCAmelCase )
_UpperCAmelCase = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = ["""It was the best of times."""]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 0 ).numpy() , expected.numpy() )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 23 ).numpy() , expected.numpy() )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 1 ).numpy() , expected.numpy() )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__lowerCAmelCase , __lowerCAmelCase )
np.testing.assert_array_equal(__lowerCAmelCase , __lowerCAmelCase )
| 363
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 1_00 ):
"""simple docstring"""
_UpperCAmelCase = (n * (n + 1) // 2) ** 2
_UpperCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 364
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 0
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 3_2
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase = 16 ):
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase = DatasetDict(
{
"""train""": dataset["""train"""].select(lowercase ),
"""validation""": dataset["""train"""].select(lowercase ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""test"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader, test_dataloader
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
# Download the dataset
_UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" )
# Create our splits
_UpperCAmelCase = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["""lr"""]
_UpperCAmelCase = int(config["""num_epochs"""] )
_UpperCAmelCase = int(config["""seed"""] )
_UpperCAmelCase = int(config["""batch_size"""] )
_UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowercase )
# New Code #
# Create our folds:
_UpperCAmelCase = kfold.split(np.zeros(datasets["""train"""].num_rows ) ,datasets["""train"""]["""label"""] )
_UpperCAmelCase = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowercase ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_fold_dataloaders(
lowercase ,lowercase ,lowercase ,lowercase ,)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.loss
_UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,lowercase )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase = []
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowercase ,dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase = torch.cat(lowercase ,dim=0 )
_UpperCAmelCase = torch.stack(lowercase ,dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase = metric.compute(predictions=lowercase ,references=lowercase )
accelerator.print("""Average test metrics from all folds:""" ,lowercase )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" ,type=lowercase ,default=3 ,help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 365
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a ( lowerCAmelCase_ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
_snake_case : Tuple = 'ssube/stable-diffusion-x4-upscaler-onnx'
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Any=0 ):
_UpperCAmelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(__lowerCAmelCase ) )
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_UpperCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class a ( unittest.TestCase ):
@property
def lowerCAmelCase_ ( self : str ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = ort.SessionOptions()
_UpperCAmelCase = False
return options
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_UpperCAmelCase = init_image.resize((128, 128) )
# using the PNDM scheduler by default
_UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = """A fantasy landscape, trending on artstation"""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowerCAmelCase , output_type="""np""" , )
_UpperCAmelCase = output.images
_UpperCAmelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_UpperCAmelCase = init_image.resize((128, 128) )
_UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
_UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = """A fantasy landscape, trending on artstation"""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowerCAmelCase , output_type="""np""" , )
_UpperCAmelCase = output.images
_UpperCAmelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 366
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 0
|
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = os.path.dirname(os.path.realpath(lowercase ) )
_UpperCAmelCase = os.path.join(lowercase ,"""words.txt""" )
_UpperCAmelCase = """"""
with open(lowercase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
_UpperCAmelCase = [
word
for word in [sum(ord(lowercase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase )
if __name__ == "__main__":
print(solution())
| 367
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = list(lowercase )
for i in range(len(lowercase ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase ,starting_batch_size=lowercase )
_UpperCAmelCase = starting_batch_size
def decorator(*lowercase ,**lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
_UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase ,*lowercase ,**lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30
| 0
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class a ( lowerCAmelCase_ ):
def __init__( self : str , **__lowerCAmelCase : Optional[Any] ):
super().__init__(**__lowerCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(__lowerCAmelCase )
def __call__( self : int , __lowerCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __lowerCAmelCase : Union[str, List[str]] = None , **__lowerCAmelCase : Dict , ):
if "text_queries" in kwargs:
_UpperCAmelCase = kwargs.pop("""text_queries""" )
if isinstance(__lowerCAmelCase , (str, Image.Image) ):
_UpperCAmelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
_UpperCAmelCase = image
_UpperCAmelCase = super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
return results
def lowerCAmelCase_ ( self : Tuple , **__lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
_UpperCAmelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int ):
_UpperCAmelCase = load_image(inputs["""image"""] )
_UpperCAmelCase = inputs["""candidate_labels"""]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = candidate_labels.split(""",""" )
_UpperCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__lowerCAmelCase ):
_UpperCAmelCase = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
_UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(__lowerCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = model_inputs.pop("""target_size""" )
_UpperCAmelCase = model_inputs.pop("""candidate_label""" )
_UpperCAmelCase = model_inputs.pop("""is_last""" )
_UpperCAmelCase = self.model(**__lowerCAmelCase )
_UpperCAmelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Dict=None ):
_UpperCAmelCase = []
for model_output in model_outputs:
_UpperCAmelCase = model_output["""candidate_label"""]
_UpperCAmelCase = BaseModelOutput(__lowerCAmelCase )
_UpperCAmelCase = self.image_processor.post_process_object_detection(
outputs=__lowerCAmelCase , threshold=__lowerCAmelCase , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
_UpperCAmelCase = outputs["""scores"""][index].item()
_UpperCAmelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
_UpperCAmelCase = {"""score""": score, """label""": label, """box""": box}
results.append(__lowerCAmelCase )
_UpperCAmelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x["score"] , reverse=__lowerCAmelCase )
if top_k:
_UpperCAmelCase = results[:top_k]
return results
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 368
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Dict = False
_snake_case : List[str] = False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 3_0522]
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""LayoutLMv3FeatureExtractor"""]
UpperCAmelCase__ = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 0
|
"""simple docstring"""
import argparse
import os
import re
UpperCAmelCase__ = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase__ = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
UpperCAmelCase__ = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def __UpperCAmelCase ( lowercase ,lowercase = False ):
"""simple docstring"""
with open(lowercase ,"""r""" ,encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = content.split("""\n""" )
_UpperCAmelCase = []
_UpperCAmelCase = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_UpperCAmelCase = len(re.search(R"""^(\s*)\S""" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_UpperCAmelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_UpperCAmelCase = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_UpperCAmelCase = sorted(lowercase ,key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def __UpperCAmelCase ( lowercase = False ):
"""simple docstring"""
_UpperCAmelCase = [os.path.join(lowercase ,lowercase ) for f in os.listdir(lowercase ) if f.endswith(""".py""" )]
_UpperCAmelCase = [sort_auto_mapping(lowercase ,overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_UpperCAmelCase = [f for f, d in zip(lowercase ,lowercase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'''
""" this.""" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCAmelCase__ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 370
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return base * power(lowercase ,(exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
UpperCAmelCase__ = int(input("""Enter the base: """).strip())
UpperCAmelCase__ = int(input("""Enter the exponent: """).strip())
UpperCAmelCase__ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
UpperCAmelCase__ = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 371
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
_UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
_UpperCAmelCase = inputs.pop("""labels""" )
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ):
_UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
_UpperCAmelCase = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 30
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase__ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ : List[str] = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class a ( lowerCAmelCase_ ):
_snake_case : Union[PIL.Image.Image, np.ndarray]
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : PriorTransformer , __lowerCAmelCase : CLIPVisionModel , __lowerCAmelCase : CLIPImageProcessor , __lowerCAmelCase : HeunDiscreteScheduler , __lowerCAmelCase : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=__lowerCAmelCase , image_encoder=__lowerCAmelCase , image_processor=__lowerCAmelCase , scheduler=__lowerCAmelCase , renderer=__lowerCAmelCase , )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int ):
if latents is None:
_UpperCAmelCase = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_UpperCAmelCase = latents.to(__lowerCAmelCase )
_UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Optional[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_UpperCAmelCase = torch.device(f'''cuda:{gpu_id}''' )
_UpperCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Any ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(__lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(__lowerCAmelCase , axis=0 )
if not isinstance(__lowerCAmelCase , torch.Tensor ):
_UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_UpperCAmelCase = image.to(dtype=self.image_encoder.dtype , device=__lowerCAmelCase )
_UpperCAmelCase = self.image_encoder(__lowerCAmelCase )["""last_hidden_state"""]
_UpperCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_UpperCAmelCase = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self : Any , __lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 25 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : float = 4.0 , __lowerCAmelCase : int = 64 , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ):
if isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(__lowerCAmelCase , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_UpperCAmelCase = len(__lowerCAmelCase )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__lowerCAmelCase )}''' )
_UpperCAmelCase = self._execution_device
_UpperCAmelCase = batch_size * num_images_per_prompt
_UpperCAmelCase = guidance_scale > 1.0
_UpperCAmelCase = self._encode_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# prior
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase )
_UpperCAmelCase = self.scheduler.timesteps
_UpperCAmelCase = self.prior.config.num_embeddings
_UpperCAmelCase = self.prior.config.embedding_dim
_UpperCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_UpperCAmelCase = latents.reshape(latents.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = self.prior(
__lowerCAmelCase , timestep=__lowerCAmelCase , proj_embedding=__lowerCAmelCase , ).predicted_image_embedding
# remove the variance
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_UpperCAmelCase = self.scheduler.step(
__lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__lowerCAmelCase )
_UpperCAmelCase = []
for i, latent in enumerate(__lowerCAmelCase ):
print()
_UpperCAmelCase = self.renderer.decode(
latent[None, :] , __lowerCAmelCase , size=__lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(__lowerCAmelCase )
_UpperCAmelCase = torch.stack(__lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_UpperCAmelCase = images.cpu().numpy()
if output_type == "pil":
_UpperCAmelCase = [self.numpy_to_pil(__lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__lowerCAmelCase )
| 350
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'luke'
def __init__( self : Any , __lowerCAmelCase : str=5_0267 , __lowerCAmelCase : Dict=50_0000 , __lowerCAmelCase : List[str]=768 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : str=3072 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Dict=512 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=1e-1_2 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Tuple=1 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[Any]=2 , **__lowerCAmelCase : str , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = entity_vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = entity_emb_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_entity_aware_attention
_UpperCAmelCase = classifier_dropout
| 351
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
def count_of_possible_combinations(lowercase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
lowercase ,lowercase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_UpperCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item ,lowercase )
for item in array )
_UpperCAmelCase = answer
return answer
_UpperCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0] * (target + 1)
_UpperCAmelCase = 1
for i in range(1 ,target + 1 ):
for j in range(lowercase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = 3
UpperCAmelCase__ = 5
UpperCAmelCase__ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 352
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 353
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
class a :
def __init__( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=None ):
_UpperCAmelCase = data
_UpperCAmelCase = previous
_UpperCAmelCase = next_node
def __str__( self : Optional[Any] ):
return f'''{self.data}'''
def lowerCAmelCase_ ( self : Any ):
return self.data
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.next
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.previous
class a :
def __init__( self : str , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = head
def __iter__( self : Dict ):
return self
def lowerCAmelCase_ ( self : Optional[int] ):
if not self.current:
raise StopIteration
else:
_UpperCAmelCase = self.current.get_data()
_UpperCAmelCase = self.current.get_next()
return value
class a :
def __init__( self : Tuple ):
_UpperCAmelCase = None # First node in list
_UpperCAmelCase = None # Last node in list
def __str__( self : Dict ):
_UpperCAmelCase = self.head
_UpperCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
_UpperCAmelCase = current.get_next()
return " ".join(str(__lowerCAmelCase ) for node in nodes )
def __contains__( self : Optional[int] , __lowerCAmelCase : int ):
_UpperCAmelCase = self.head
while current:
if current.get_data() == value:
return True
_UpperCAmelCase = current.get_next()
return False
def __iter__( self : str ):
return LinkedListIterator(self.head )
def lowerCAmelCase_ ( self : Any ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase_ ( self : str ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Node ):
if self.head is None:
_UpperCAmelCase = node
_UpperCAmelCase = node
else:
self.insert_before_node(self.head , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Node ):
if self.head is None:
self.set_head(__lowerCAmelCase )
else:
self.insert_after_node(self.tail , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = Node(__lowerCAmelCase )
if self.head is None:
self.set_head(__lowerCAmelCase )
else:
self.set_tail(__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ):
_UpperCAmelCase = node
_UpperCAmelCase = node.previous
if node.get_previous() is None:
_UpperCAmelCase = node_to_insert
else:
_UpperCAmelCase = node_to_insert
_UpperCAmelCase = node_to_insert
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ):
_UpperCAmelCase = node
_UpperCAmelCase = node.next
if node.get_next() is None:
_UpperCAmelCase = node_to_insert
else:
_UpperCAmelCase = node_to_insert
_UpperCAmelCase = node_to_insert
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = 1
_UpperCAmelCase = Node(__lowerCAmelCase )
_UpperCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(__lowerCAmelCase , __lowerCAmelCase )
return
current_position += 1
_UpperCAmelCase = node.next
self.insert_after_node(self.tail , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = self.head
while node:
if node.get_data() == item:
return node
_UpperCAmelCase = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] ):
if (node := self.get_node(__lowerCAmelCase )) is not None:
if node == self.head:
_UpperCAmelCase = self.head.get_next()
if node == self.tail:
_UpperCAmelCase = self.tail.get_previous()
self.remove_node_pointers(__lowerCAmelCase )
@staticmethod
def lowerCAmelCase_ ( __lowerCAmelCase : Node ):
if node.get_next():
_UpperCAmelCase = node.previous
if node.get_previous():
_UpperCAmelCase = node.next
_UpperCAmelCase = None
_UpperCAmelCase = None
def lowerCAmelCase_ ( self : Dict ):
return self.head is None
def __UpperCAmelCase ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.