code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowercase (_lowercase, _lowercase, _lowercase ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCamelCase : List[str] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
__lowerCamelCase : Optional[Any] = f"{src_lang}-{tgt_lang}"
__lowerCamelCase : Optional[int] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(_lowercase, exist_ok=_lowercase )
__lowerCamelCase : Optional[Any] = os.path.join(_lowercase, """README.md""" )
print(f"Generating {path}" )
with open(_lowercase, """w""", encoding="""utf-8""" ) as f:
f.write(_lowercase )
# make sure we are under the root of the project
UpperCAmelCase__ :int = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase__ :List[str] = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :str = model_name.split("""-""")
UpperCAmelCase__ :Union[str, Any] = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 150 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ :List[str] = logging.get_logger(__name__)
UpperCAmelCase__ :Union[str, Any] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : str = 'altclip_text_model'
def __init__( self : List[Any] , A__ : Optional[int]=250002 , A__ : Any=1024 , A__ : List[Any]=24 , A__ : Dict=16 , A__ : Union[str, Any]=4096 , A__ : Union[str, Any]="gelu" , A__ : str=0.1 , A__ : int=0.1 , A__ : str=514 , A__ : Optional[int]=1 , A__ : Optional[Any]=0.02 , A__ : int=0.02 , A__ : Optional[Any]=1e-0_5 , A__ : int=1 , A__ : Optional[Any]=0 , A__ : Dict=2 , A__ : Optional[int]="absolute" , A__ : Optional[int]=True , A__ : List[str]=768 , **A__ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Tuple = num_attention_heads
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : Dict = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : List[str] = max_position_embeddings
__lowerCamelCase : Optional[Any] = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[int] = initializer_factor
__lowerCamelCase : List[Any] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : str = use_cache
__lowerCamelCase : Optional[Any] = project_dim
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : List[Any] = 'altclip_vision_model'
def __init__( self : Optional[int] , A__ : str=768 , A__ : str=3072 , A__ : str=512 , A__ : Optional[int]=12 , A__ : List[Any]=12 , A__ : Union[str, Any]=3 , A__ : Dict=224 , A__ : List[Any]=32 , A__ : List[Any]="quick_gelu" , A__ : Dict=1e-5 , A__ : List[str]=0.0 , A__ : Dict=0.02 , A__ : List[str]=1.0 , **A__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**A__ )
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : Optional[int] = intermediate_size
__lowerCamelCase : Optional[Any] = projection_dim
__lowerCamelCase : Union[str, Any] = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : str = num_channels
__lowerCamelCase : Any = patch_size
__lowerCamelCase : Any = image_size
__lowerCamelCase : Any = initializer_range
__lowerCamelCase : List[str] = initializer_factor
__lowerCamelCase : List[str] = attention_dropout
__lowerCamelCase : Any = layer_norm_eps
__lowerCamelCase : Any = hidden_act
@classmethod
def a_ ( cls : str , A__ : Union[str, os.PathLike] , **A__ : List[str] ):
"""simple docstring"""
cls._set_token_in_kwargs(A__ )
__lowerCamelCase , __lowerCamelCase : str = cls.get_config_dict(A__ , **A__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
__lowerCamelCase : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : int = 'altclip'
snake_case__ : Dict = True
def __init__( self : Optional[Any] , A__ : Optional[Any]=None , A__ : Union[str, Any]=None , A__ : Union[str, Any]=768 , A__ : Tuple=2.6592 , **A__ : List[Any] ):
"""simple docstring"""
__lowerCamelCase : str = kwargs.pop("""text_config_dict""" , A__ )
__lowerCamelCase : Dict = kwargs.pop("""vision_config_dict""" , A__ )
super().__init__(**A__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__lowerCamelCase : Any = {}
# This is the complete result when using `text_config_dict`.
__lowerCamelCase : Tuple = AltCLIPTextConfig(**A__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__lowerCamelCase : Optional[Any] = (
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
f"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__lowerCamelCase : int = (
f"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
f"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(A__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__lowerCamelCase : Dict = {}
# This is the complete result when using `vision_config_dict`.
__lowerCamelCase : List[str] = AltCLIPVisionConfig(**A__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__lowerCamelCase : str = {
str(A__ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__lowerCamelCase : List[Any] = (
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
f"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__lowerCamelCase : Optional[Any] = (
f"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
f"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(A__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__lowerCamelCase : List[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
__lowerCamelCase : List[str] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
__lowerCamelCase : Union[str, Any] = AltCLIPTextConfig(**A__ )
__lowerCamelCase : Optional[int] = AltCLIPVisionConfig(**A__ )
__lowerCamelCase : Optional[Any] = projection_dim
__lowerCamelCase : List[str] = logit_scale_init_value
__lowerCamelCase : Union[str, Any] = 1.0
@classmethod
def a_ ( cls : Optional[Any] , A__ : AltCLIPTextConfig , A__ : AltCLIPVisionConfig , **A__ : List[str] ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A__ )
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Optional[Any] = self.text_config.to_dict()
__lowerCamelCase : Tuple = self.vision_config.to_dict()
__lowerCamelCase : Tuple = self.__class__.model_type
return output
| 150 | 1 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase__ =datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCamelCase__ ( datasets.BuilderConfig ):
a : Optional[datasets.Features] = None
def lowerCAmelCase_ ( UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : List[int] , ):
"""simple docstring"""
import pyspark
def generate_fn():
__lowercase = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
__lowercase = df_with_partition_id.select("""*""" ).where(f'''part_id = {partition_id}''' ).drop("""part_id""" )
__lowercase = partition_df.collect()
__lowercase = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class lowerCamelCase__ ( _BaseExamplesIterable ):
def __init__( self : Dict , A_ : "pyspark.sql.DataFrame" , A_ : Union[str, Any]=None , ):
'''simple docstring'''
__lowercase = df
__lowercase = partition_order or range(self.df.rdd.getNumPartitions() )
__lowercase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Optional[Any] ):
'''simple docstring'''
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , A_ : np.random.Generator ):
'''simple docstring'''
__lowercase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(A_ )
return SparkExamplesIterable(self.df , partition_order=A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , A_ : int , A_ : int ):
'''simple docstring'''
__lowercase = self.split_shard_indices_by_worker(A_ , A_ )
return SparkExamplesIterable(self.df , partition_order=A_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
return len(self.partition_order )
class lowerCamelCase__ ( datasets.DatasetBuilder ):
a : List[Any] = SparkConfig
def __init__( self : Any , A_ : "pyspark.sql.DataFrame" , A_ : str = None , A_ : str = None , **A_ : Dict , ):
'''simple docstring'''
import pyspark
__lowercase = pyspark.sql.SparkSession.builder.getOrCreate()
__lowercase = df
__lowercase = working_dir
super().__init__(
cache_dir=A_ , config_name=str(self.df.semanticHash() ) , **A_ , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
def create_cache_and_write_probe(A_ : Union[str, Any] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=A_ )
__lowercase = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(A_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowercase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(A_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : Dict , A_ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Optional[Any] ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(A_ : Optional[Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
__lowercase = self.df.count()
__lowercase = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowercase = (
self.df.limit(A_ )
.repartition(1 )
.mapInArrow(A_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowercase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowercase = min(A_ , int(approx_total_size / max_shard_size ) )
__lowercase = self.df.repartition(A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , A_ : str , A_ : str , A_ : int , ):
'''simple docstring'''
import pyspark
__lowercase = ParquetWriter if file_format == """parquet""" else ArrowWriter
__lowercase = os.path.join(self._working_dir , os.path.basename(A_ ) ) if self._working_dir else fpath
__lowercase = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowercase = self.config.features
__lowercase = self._writer_batch_size
__lowercase = self._fs.storage_options
def write_arrow(A_ : List[Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowercase = pyspark.TaskContext().taskAttemptId()
__lowercase = next(A_ , A_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
__lowercase = 0
__lowercase = writer_class(
features=A_ , path=working_fpath.replace("""SSSSS""" , F'''{shard_id:05d}''' ).replace("""TTTTT""" , F'''{task_id:05d}''' ) , writer_batch_size=A_ , storage_options=A_ , embed_local_files=A_ , )
__lowercase = pa.Table.from_batches([first_batch] )
writer.write_table(A_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowercase , __lowercase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
__lowercase = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F'''{shard_id:05d}''' ).replace("""TTTTT""" , F'''{task_id:05d}''' ) , writer_batch_size=A_ , storage_options=A_ , embed_local_files=A_ , )
__lowercase = pa.Table.from_batches([batch] )
writer.write_table(A_ )
if writer._num_bytes > 0:
__lowercase , __lowercase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(A_ ) ):
__lowercase = os.path.join(os.path.dirname(A_ ) , os.path.basename(A_ ) )
shutil.move(A_ , A_ )
__lowercase = (
self.df.mapInArrow(A_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , A_ : "datasets.SplitGenerator" , A_ : str = "arrow" , A_ : Optional[Union[str, int]] = None , A_ : Optional[int] = None , **A_ : Union[str, Any] , ):
'''simple docstring'''
self._validate_cache_dir()
__lowercase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(A_ )
__lowercase = not is_remote_filesystem(self._fs )
__lowercase = os.path.join if is_local else posixpath.join
__lowercase = """-TTTTT-SSSSS-of-NNNNN"""
__lowercase = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
__lowercase = path_join(self._output_dir , A_ )
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = []
__lowercase = []
for task_id, content in self._prepare_split_single(A_ , A_ , A_ ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(A_ )
__lowercase = total_num_examples
__lowercase = total_num_bytes
# should rename everything at the end
logger.debug(F'''Renaming {total_shards} shards.''' )
if total_shards > 1:
__lowercase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowercase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
A_ : int , A_ : int , A_ : int , ):
rename(
A_ , fpath.replace("""SSSSS""" , F'''{shard_id:05d}''' ).replace("""TTTTT""" , F'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , F'''{global_shard_id:05d}''' ).replace("""NNNNN""" , F'''{total_shards:05d}''' ) , )
__lowercase = []
__lowercase = 0
for i in range(len(A_ ) ):
__lowercase , __lowercase = task_id_and_num_shards[i]
for shard_id in range(A_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(A_ , len(A_ ) ).map(lambda A_ : _rename_shard(*A_ ) ).collect()
else:
# don't use any pattern
__lowercase = 0
__lowercase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F'''{shard_id:05d}''' ).replace("""TTTTT""" , F'''{task_id:05d}''' ) , fpath.replace(A_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : int , A_ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 442 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
assert x is not None
assert y is not None
__lowercase = len(UpperCamelCase__ )
__lowercase = len(UpperCamelCase__ )
# declaring the array for storing the dp values
__lowercase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__lowercase = 1 if x[i - 1] == y[j - 1] else 0
__lowercase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__lowercase = """"""
__lowercase , __lowercase = m, n
while i > 0 and j > 0:
__lowercase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowercase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCAmelCase__ ="AGGTAB"
UpperCAmelCase__ ="GXTXAYB"
UpperCAmelCase__ =4
UpperCAmelCase__ ="GTAB"
UpperCAmelCase__ , UpperCAmelCase__ =longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 442 | 1 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ : str = logging.get_logger(__name__)
class _a ( __lowerCAmelCase ):
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
super().__init__()
_snake_case = nn.ModuleList(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = True ,) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,self.nets ) ):
_snake_case , _snake_case = controlnet(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,)
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,) -> Union[str, Any]:
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_SCREAMING_SNAKE_CASE ,is_main_process=_SCREAMING_SNAKE_CASE ,save_function=_SCREAMING_SNAKE_CASE ,safe_serialization=_SCREAMING_SNAKE_CASE ,variant=_SCREAMING_SNAKE_CASE ,)
idx += 1
_snake_case = model_path_to_save + f"""_{idx}"""
@classmethod
def _lowercase ( cls ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Optional[int]:
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(_SCREAMING_SNAKE_CASE ):
_snake_case = ControlNetModel.from_pretrained(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
controlnets.append(_SCREAMING_SNAKE_CASE )
idx += 1
_snake_case = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(_SCREAMING_SNAKE_CASE )} controlnets loaded from {pretrained_model_path}.""" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(_SCREAMING_SNAKE_CASE )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(_SCREAMING_SNAKE_CASE )
| 185 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ : Dict = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Union[str, Any] = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : str = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 185 | 1 |
def _UpperCamelCase ( lowercase__ ):
return str(lowercase__ ) == str(lowercase__ )[::-1]
def _UpperCamelCase ( lowercase__ ):
return int(lowercase__ ) + int(str(lowercase__ )[::-1] )
def _UpperCamelCase ( lowercase__ = 10000 ):
__SCREAMING_SNAKE_CASE : int = []
for num in range(1 , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Any = num
while iterations < 50:
__SCREAMING_SNAKE_CASE : Union[str, Any] = sum_reverse(lowercase__ )
iterations += 1
if is_palindrome(lowercase__ ):
break
else:
lychrel_nums.append(lowercase__ )
return len(lowercase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 260 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ['''pixel_values''']
def __init__( self :str , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Dict[str, int]] = None , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[int, float] = 1 / 255 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ :Tuple , ) -> None:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = size if size is not None else {'''shortest_edge''': 256}
__SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = do_resize
__SCREAMING_SNAKE_CASE : str = size
__SCREAMING_SNAKE_CASE : Any = resample
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_center_crop
__SCREAMING_SNAKE_CASE : Tuple = crop_size
__SCREAMING_SNAKE_CASE : List[str] = do_rescale
__SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
__SCREAMING_SNAKE_CASE : Any = do_normalize
__SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :int , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : Tuple = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Any , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : str = get_size_dict(lowerCAmelCase__ )
return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :float , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :str ) -> np.ndarray:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :List[Any] , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[float] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE : Tuple = size if size is not None else self.size
__SCREAMING_SNAKE_CASE : Dict = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE : str = get_size_dict(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE : int = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE : int = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE : int = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE : Optional[Any] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE : int = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE : Tuple = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 260 | 1 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
a_ = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
a_ = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def _a ( UpperCamelCase_ : Dict ) -> str:
"""simple docstring"""
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(UpperCamelCase_ )
return pairs
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , **__UpperCAmelCase , )-> str:
'''simple docstring'''
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = merges_file
lowerCAmelCase__ = {}
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 3
self.add_from_file(__UpperCAmelCase )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split("\n" )[:-1]
lowerCAmelCase__ = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False )-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(__UpperCAmelCase )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowerCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
lowerCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(__UpperCAmelCase )
lowerCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(__UpperCAmelCase )
lowerCAmelCase__ = "@@ ".join(__UpperCAmelCase )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
return word
def UpperCAmelCase ( self , __UpperCAmelCase )-> Any:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(R"\S+\n?" , __UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = " ".join(__UpperCAmelCase ).replace("@@ " , "" ).strip()
return out_string
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.merges_file , __UpperCAmelCase )
return out_vocab_file, out_merge_file
def UpperCAmelCase ( self , __UpperCAmelCase )-> Any:
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
try:
with open(__UpperCAmelCase , "r" , encoding="utf-8" ) as fd:
self.add_from_file(__UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
lowerCAmelCase__ = f.readlines()
for lineTmp in lines:
lowerCAmelCase__ = lineTmp.strip()
lowerCAmelCase__ = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
lowerCAmelCase__ = line[:idx]
lowerCAmelCase__ = len(self.encoder )
| 339 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""convbert"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=768 , __UpperCAmelCase=2 , __UpperCAmelCase=9 , __UpperCAmelCase=1 , __UpperCAmelCase=None , **__UpperCAmelCase , )-> Any:
'''simple docstring'''
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = embedding_size
lowerCAmelCase__ = head_ratio
lowerCAmelCase__ = conv_kernel_size
lowerCAmelCase__ = num_groups
lowerCAmelCase__ = classifier_dropout
class lowercase__ ( _UpperCAmelCase ):
@property
def UpperCAmelCase ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 339 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : int = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a = 'swin'
__a = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , SCREAMING_SNAKE_CASE__=224 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=96 , SCREAMING_SNAKE_CASE__=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=4.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
super().__init__(**snake_case__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(snake_case__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
lowerCamelCase__ = ["stem"] + [f'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
lowerCamelCase__ = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
class _A ( SCREAMING_SNAKE_CASE__ ):
__a = version.parse('1.11' )
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowerCamelCase ( self ) -> int:
return 1e-4
| 719 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger(__name__)
class _A ( __a ):
__a = 'encoder-decoder'
__a = True
def __init__( self , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCamelCase__ = kwargs.pop("encoder" )
lowerCamelCase__ = encoder_config.pop("model_type" )
lowerCamelCase__ = kwargs.pop("decoder" )
lowerCamelCase__ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase__ = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = True
@classmethod
def _lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> PretrainedConfig:
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowerCamelCase__ = True
lowerCamelCase__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> int:
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.encoder.to_dict()
lowerCamelCase__ = self.decoder.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 274 | 0 |
'''simple docstring'''
def _a ( __lowerCAmelCase : str ):
"""simple docstring"""
snake_case__ : str = len(__lowerCAmelCase )
snake_case__ : Optional[Any] = sum(__lowerCAmelCase )
snake_case__ : Any = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
snake_case__ : Dict = True
for i in range(1 , s + 1 ):
snake_case__ : Dict = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
snake_case__ : Tuple = dp[i][j - 1]
if arr[i - 1] <= j:
snake_case__ : str = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
snake_case__ : Union[str, Any] = s - 2 * j
break
return diff
| 347 |
'''simple docstring'''
import math
def _a ( __lowerCAmelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( __lowerCAmelCase : int = 1_00_01 ):
"""simple docstring"""
try:
snake_case__ : List[Any] = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case__ : list[int] = []
snake_case__ : Dict = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347 | 1 |
'''simple docstring'''
from math import factorial, radians
def SCREAMING_SNAKE_CASE__ ( __A , __A = 18 , __A = 10 ) -> float:
_snake_case = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
_snake_case = radians(__A )
_snake_case = angle_in_radians
_snake_case = 3
_snake_case = -1
for _ in range(__A ):
result += (b * (angle_in_radians**a)) / factorial(__A )
_snake_case = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__A , __A )
if __name__ == "__main__":
__import__("doctest").testmod()
| 542 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = "x" , __A = 10**-10 , __A = 1 , ) -> complex:
_snake_case = symbols(__A )
_snake_case = lambdify(__A , __A )
_snake_case = lambdify(__A , diff(__A , __A ) )
_snake_case = starting_point
while True:
if diff_function(__A ) != 0:
_snake_case = prev_guess - multiplicity * func(__A ) / diff_function(
__A )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_snake_case = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}''')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'''{newton_raphson("exp(x) - 1", 10, precision=0.005)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 542 | 1 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_lowerCAmelCase : Any = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
_lowerCAmelCase : int = logging.WARNING
def lowerCamelCase_( ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : str = os.getenv("DATASETS_VERBOSITY" , _lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def lowerCamelCase_( ) -> str:
'''simple docstring'''
return __name__.split("." )[0]
def lowerCamelCase_( ) -> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : str = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Dict = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowerCamelCase_( _lowerCamelCase = None ) -> logging.Logger:
'''simple docstring'''
if name is None:
_lowerCamelCase : List[Any] = _get_library_name()
return logging.getLogger(_lowerCAmelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
_get_library_root_logger().setLevel(_lowerCAmelCase )
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
return set_verbosity(_lowerCAmelCase )
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
return set_verbosity(_lowerCAmelCase )
def lowerCamelCase_( ) -> Union[str, Any]:
'''simple docstring'''
return set_verbosity(_lowerCAmelCase )
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
return set_verbosity(_lowerCAmelCase )
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[int] = False
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Tuple = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class A_ :
def __init__( self: Dict ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: List[str] ): # pylint: disable=unused-argument
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = args[0] if args else None
def __iter__( self: int ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self: Optional[int] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
def empty_fn(*__lowerCAmelCase: Any ,**__lowerCAmelCase: Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self: List[Any] ):
'''simple docstring'''
return self
def __exit__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
return
_lowerCAmelCase : Optional[Any] = True
class A_ :
def __call__( self: Union[str, Any] ,*__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int=False ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*A_ ,**A_ )
else:
return EmptyTqdm(*A_ ,**A_ )
def _lowercase ( self: Optional[Any] ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*A_ ,**A_ )
def _lowercase ( self: List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCAmelCase : List[Any] = _tqdm_cls()
def lowerCamelCase_( ) -> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
global _tqdm_active
_lowerCamelCase : Optional[Any] = True
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
global _tqdm_active
_lowerCamelCase : Optional[Any] = False | 46 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class A__ ( __snake_case ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 629 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'spiece.model'}
lowercase_ = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase_ = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
lowercase_ = '▁'
class _snake_case ( a__):
UpperCamelCase__ : Optional[int] =VOCAB_FILES_NAMES
UpperCamelCase__ : List[str] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Optional[Any] =["""input_ids""", """attention_mask"""]
def __init__( self : List[str], __lowercase : int, __lowercase : Any="</s>", __lowercase : Tuple="<unk>", __lowercase : Optional[int]="<pad>", __lowercase : Optional[Any]=100, __lowercase : Union[str, Any]=None, __lowercase : List[Any] = None, __lowercase : Any=True, **__lowercase : Dict, ):
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ = [F'''<extra_id_{i}>''' for i in range(lowercase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowercase__ = len(set(filter(lambda __lowercase : bool("extra_id" in str(lowercase__ ) ), lowercase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
lowercase__ = legacy
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase__, unk_token=lowercase__, pad_token=lowercase__, extra_ids=lowercase__, additional_special_tokens=lowercase__, sp_model_kwargs=self.sp_model_kwargs, legacy=lowercase__, **lowercase__, )
lowercase__ = vocab_file
lowercase__ = extra_ids
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@staticmethod
def A__ ( __lowercase : int, __lowercase : str, __lowercase : Optional[int] ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowercase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value.", lowercase__, )
return max_model_length
@property
def A__ ( self : Tuple ):
return self.sp_model.get_piece_size() + self._extra_ids
def A__ ( self : List[Any] ):
lowercase__ = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self : str, __lowercase : Tuple, __lowercase : Optional[int] = None, __lowercase : List[Any] = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__, token_ids_a=lowercase__, already_has_special_tokens=lowercase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase__ )) + [1]
return ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def A__ ( self : Optional[Any] ):
return list(
set(filter(lambda __lowercase : bool(re.search(R"<extra_id_\d+>", lowercase__ ) ) is not None, self.additional_special_tokens ) ) )
def A__ ( self : List[str] ):
return [self._convert_token_to_id(lowercase__ ) for token in self.get_sentinel_tokens()]
def A__ ( self : Dict, __lowercase : str ):
if len(lowercase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def A__ ( self : Any, __lowercase : str, __lowercase : Union[str, Any] = None ):
lowercase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A__ ( self : Optional[Any], __lowercase : List[Any], __lowercase : str = None ):
lowercase__ = self._add_eos_if_not_present(lowercase__ )
if token_ids_a is None:
return token_ids_a
else:
lowercase__ = self._add_eos_if_not_present(lowercase__ )
return token_ids_a + token_ids_a
def __getstate__( self : str ):
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : int, __lowercase : List[str] ):
lowercase__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self : Tuple, __lowercase : Union[str, Any], **__lowercase : Optional[int] ):
if not self.legacy:
lowercase__ = SPIECE_UNDERLINE + text.replace(lowercase__, " " )
return super().tokenize(lowercase__, **lowercase__ )
def A__ ( self : Optional[Any], __lowercase : Tuple, **__lowercase : Dict ):
if not self.legacy:
lowercase__ = text.startswith(lowercase__ )
if is_first:
lowercase__ = text[1:]
lowercase__ = self.sp_model.encode(lowercase__, out_type=lowercase__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(lowercase__ ):
lowercase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def A__ ( self : Union[str, Any], __lowercase : str ):
if token.startswith("<extra_id_" ):
lowercase__ = re.match(R"<extra_id_(\d+)>", lowercase__ )
lowercase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase__ )
def A__ ( self : Optional[Any], __lowercase : Tuple ):
if index < self.sp_model.get_piece_size():
lowercase__ = self.sp_model.IdToPiece(lowercase__ )
else:
lowercase__ = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def A__ ( self : Dict, __lowercase : str ):
lowercase__ = []
lowercase__ = ''''''
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(lowercase__ )
lowercase__ = False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def A__ ( self : Optional[int], __lowercase : str, __lowercase : Union[str, Any] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
lowercase__, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__, "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37 | 0 |
import csv
import tweepy
# Twitter API credentials
__lowerCamelCase : str = """"""
__lowerCamelCase : Optional[int] = """"""
__lowerCamelCase : Dict = """"""
__lowerCamelCase : Dict = """"""
def A__ ( _a : Optional[int] ):
'''simple docstring'''
snake_case__ : int =tweepy.OAuthHandler(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
auth.set_access_token(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : int =tweepy.API(__SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
snake_case__ : List[Any] =[]
# make initial request for most recent tweets (200 is the maximum allowed count)
snake_case__ : Dict =api.user_timeline(screen_name=__SCREAMING_SNAKE_CASE , count=200 )
# save most recent tweets
alltweets.extend(__SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
snake_case__ : Optional[Any] =alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__SCREAMING_SNAKE_CASE ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
snake_case__ : Tuple =api.user_timeline(
screen_name=__SCREAMING_SNAKE_CASE , count=200 , max_id=__SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(__SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
snake_case__ : Optional[Any] =alltweets[-1].id - 1
print(f"...{len(__SCREAMING_SNAKE_CASE )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
snake_case__ : Union[str, Any] =[[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" , """w""" ) as f:
snake_case__ : Optional[int] =csv.writer(__SCREAMING_SNAKE_CASE )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 385 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A = True
except ImportError:
__A = False
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class snake_case ( __snake_case ):
@staticmethod
def lowercase_ ( UpperCamelCase__ : ArgumentParser)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = parser.add_parser("add-new-model")
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode.")
add_new_model_parser.add_argument("--testing_file" , type=UpperCamelCase__ , help="Configuration file on which to run.")
add_new_model_parser.add_argument(
"--path" , type=UpperCamelCase__ , help="Path to cookiecutter. Should only be used for testing purposes.")
add_new_model_parser.set_defaults(func=UpperCamelCase__)
def __init__( self : Dict , UpperCamelCase__ : bool , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , *UpperCamelCase__ : Tuple)-> str:
'''simple docstring'''
__lowerCAmelCase: Dict = testing
__lowerCAmelCase: Any = testing_file
__lowerCAmelCase: str = path
def lowercase_ ( self : int)-> Optional[int]:
'''simple docstring'''
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead.")
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n")
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__lowerCAmelCase: Union[str, Any] = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:2_2]]
if len(UpperCamelCase__) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory.")
__lowerCAmelCase: Any = (
Path(UpperCamelCase__).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
)
__lowerCAmelCase: Any = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase__))
else:
with open(self._testing_file , "r") as configuration_file:
__lowerCAmelCase: Any = json.load(UpperCamelCase__)
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path) , no_input=UpperCamelCase__ , extra_context=UpperCamelCase__ , )
__lowerCAmelCase: Dict = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:2_2]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r") as configuration_file:
__lowerCAmelCase: Optional[int] = json.load(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = configuration["lowercase_modelname"]
__lowerCAmelCase: List[str] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f"{directory}/configuration.json")
__lowerCAmelCase: Optional[Any] = "PyTorch" in generate_tensorflow_pytorch_and_flax
__lowerCAmelCase: str = "TensorFlow" in generate_tensorflow_pytorch_and_flax
__lowerCAmelCase: str = "Flax" in generate_tensorflow_pytorch_and_flax
__lowerCAmelCase: Any = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__)
os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=UpperCamelCase__)
# Tests require submodules as they have parent imports
with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , "w"):
pass
shutil.move(
f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , )
shutil.move(
f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(UpperCamelCase__ : int):
with open(UpperCamelCase__ , "r") as f:
__lowerCAmelCase: int = f.readlines()
with open(UpperCamelCase__ , "w") as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase__)
if output_pytorch:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py")
shutil.move(
f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_{lowercase_model_name}.py")
os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py")
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py")
shutil.move(
f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py")
os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py")
if output_flax:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py")
shutil.move(
f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py")
os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py")
shutil.move(
f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : List[str]):
# Create temp file
__lowerCAmelCase , __lowerCAmelCase: List[str] = mkstemp()
__lowerCAmelCase: Dict = False
with fdopen(UpperCamelCase__ , "w") as new_file:
with open(UpperCamelCase__) as old_file:
for line in old_file:
new_file.write(UpperCamelCase__)
if line_to_copy_below in line:
__lowerCAmelCase: str = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase__)
if not line_found:
raise ValueError(f"Line {line_to_copy_below} was not found in file.")
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase__ , UpperCamelCase__)
# Remove original file
remove(UpperCamelCase__)
# Move new file
move(UpperCamelCase__ , UpperCamelCase__)
def skip_units(UpperCamelCase__ : Union[str, Any]):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase__ : List[str]):
with open(UpperCamelCase__) as datafile:
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: str = False
__lowerCAmelCase: Optional[int] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__lowerCAmelCase: List[Any] = line.split("\"")[1]
__lowerCAmelCase: Dict = skip_units(UpperCamelCase__)
elif "# Below: " in line and "##" not in line:
__lowerCAmelCase: List[Any] = line.split("\"")[1]
__lowerCAmelCase: Any = skip_units(UpperCamelCase__)
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: List[str] = []
elif "# Replace with" in line and "##" not in line:
__lowerCAmelCase: List[str] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase__)
remove(UpperCamelCase__)
replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py")
os.rmdir(UpperCamelCase__)
| 346 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase = 16
lowercase = 32
def __UpperCAmelCase ( a_ , a_ = 16 , a_ = "bert-base-cased"):
snake_case_ = AutoTokenizer.from_pretrained(a_)
snake_case_ = load_dataset('glue' , 'mrpc')
def tokenize_function(a_):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a_ , max_length=a_)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
a_ , batched=a_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=a_)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(a_):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a_ , padding='max_length' , max_length=1_28 , return_tensors='pt')
return tokenizer.pad(a_ , padding='longest' , return_tensors='pt')
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets['train'] , shuffle=a_ , collate_fn=a_ , batch_size=a_)
snake_case_ = DataLoader(
tokenized_datasets['validation'] , shuffle=a_ , collate_fn=a_ , batch_size=a_)
return train_dataloader, eval_dataloader
def __UpperCAmelCase ( a_ , a_ , a_ , a_):
model.eval()
snake_case_ = 0
for step, batch in enumerate(a_):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
snake_case_ = model(**a_)
snake_case_ = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
snake_case_ , snake_case_ = accelerator.gather(
(predictions, batch['labels'])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(a_) - 1:
snake_case_ = predictions[: len(eval_dataloader.dataset) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=a_ , references=a_ , )
snake_case_ = metric.compute()
return eval_metric["accuracy"]
def __UpperCAmelCase ( a_ , a_):
# Initialize accelerator
snake_case_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config['lr']
snake_case_ = int(config['num_epochs'])
snake_case_ = int(config['seed'])
snake_case_ = int(config['batch_size'])
snake_case_ = args.model_name_or_path
set_seed(a_)
snake_case_ , snake_case_ = get_dataloaders(a_ , a_ , a_)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained(a_ , return_dict=a_)
# Instantiate optimizer
snake_case_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ = optimizer_cls(params=model.parameters() , lr=a_)
if accelerator.state.deepspeed_plugin is not None:
snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
snake_case_ = 1
snake_case_ = (len(a_) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=0 , num_training_steps=a_ , )
else:
snake_case_ = DummyScheduler(a_ , total_num_steps=a_ , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
a_ , a_ , a_ , a_ , a_)
# We need to keep track of how many total steps we have iterated over
snake_case_ = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ = 0
snake_case_ = evaluate.load('glue' , 'mrpc')
snake_case_ = num_epochs
if args.partial_train_epoch is not None:
snake_case_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint)
snake_case_ = args.resume_from_checkpoint.split('epoch_')[1]
snake_case_ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case_ = int(a_) + 1
snake_case_ = evaluation_loop(a_ , a_ , a_ , a_)
accelerator.print('resumed checkpoint performance:' , a_)
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0])
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'])
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''') , 'r') as f:
snake_case_ = json.load(a_)
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case_ = {}
for epoch in range(a_ , a_):
model.train()
for step, batch in enumerate(a_):
snake_case_ = model(**a_)
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(a_)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case_ = f'''epoch_{epoch}'''
snake_case_ = os.path.join(args.output_dir , a_)
accelerator.save_state(a_)
snake_case_ = evaluation_loop(a_ , a_ , a_ , a_)
snake_case_ = accuracy
snake_case_ = lr_scheduler.get_lr()[0]
snake_case_ = optimizer.param_groups[0]['lr']
snake_case_ = epoch
snake_case_ = overall_step
accelerator.print(f'''epoch {epoch}:''' , a_)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''') , 'w') as f:
json.dump(a_ , a_)
def __UpperCAmelCase ( ):
snake_case_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.')
parser.add_argument(
'--model_name_or_path' , type=a_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=a_ , )
parser.add_argument(
'--output_dir' , type=a_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=a_ , default=a_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=a_ , default=a_ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=a_ , default=2 , help='Number of train epochs.' , )
snake_case_ = parser.parse_args()
snake_case_ = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(a_ , a_)
if __name__ == "__main__":
main()
| 607 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def __UpperCAmelCase ( a_ , a_=None):
require_version(deps[pkg] , a_)
| 607 | 1 |
'''simple docstring'''
class _lowercase :
def __init__( self : Any ) -> Union[str, Any]:
__snake_case = ''
__snake_case = ''
__snake_case = []
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
__snake_case = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
__snake_case = self.__min_dist_top_down_dp(SCREAMING_SNAKE_CASE_ , n - 1 )
__snake_case = self.__min_dist_top_down_dp(m - 1 , SCREAMING_SNAKE_CASE_ )
__snake_case = self.__min_dist_top_down_dp(m - 1 , n - 1 )
__snake_case = 1 + min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self.dp[m][n]
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> int:
__snake_case = worda
__snake_case = worda
__snake_case = [[-1 for _ in range(len(SCREAMING_SNAKE_CASE_ ) )] for _ in range(len(SCREAMING_SNAKE_CASE_ ) )]
return self.__min_dist_top_down_dp(len(SCREAMING_SNAKE_CASE_ ) - 1 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> int:
__snake_case = worda
__snake_case = worda
__snake_case = len(SCREAMING_SNAKE_CASE_ )
__snake_case = len(SCREAMING_SNAKE_CASE_ )
__snake_case = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
__snake_case = j
elif j == 0: # second string is empty
__snake_case = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
__snake_case = self.dp[i - 1][j - 1]
else:
__snake_case = self.dp[i][j - 1]
__snake_case = self.dp[i - 1][j]
__snake_case = self.dp[i - 1][j - 1]
__snake_case = 1 + min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self.dp[m][n]
if __name__ == "__main__":
_a : Optional[Any] = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
_a : Any = input("Enter the first string: ").strip()
_a : str = input("Enter the second string: ").strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 56 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Dict = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='informer'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[Any] , __a : Optional[int] = None , __a : Optional[int] = None , __a : str = "student_t" , __a : str = "nll" , __a : int = 1 , __a : List[int] = None , __a : Optional[Union[str, bool]] = "mean" , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : Optional[List[int]] = None , __a : Optional[List[int]] = None , __a : int = 64 , __a : int = 32 , __a : int = 32 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : bool = True , __a : str = "gelu" , __a : float = 0.05 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : int = 1_00 , __a : float = 0.02 , __a : List[Any]=True , __a : str = "prob" , __a : int = 5 , __a : bool = True , **__a : Any , ):
# time series specific configuration
_a = prediction_length
_a = context_length or prediction_length
_a = distribution_output
_a = loss
_a = input_size
_a = num_time_features
_a = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_a = scaling
_a = num_dynamic_real_features
_a = num_static_real_features
_a = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_a = cardinality
else:
_a = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_a = embedding_dimension
else:
_a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_a = num_parallel_samples
# Transformer architecture configuration
_a = input_size * len(self.lags_sequence ) + self._number_of_features
_a = d_model
_a = encoder_attention_heads
_a = decoder_attention_heads
_a = encoder_ffn_dim
_a = decoder_ffn_dim
_a = encoder_layers
_a = decoder_layers
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = activation_function
_a = init_std
_a = use_cache
# Informer
_a = attention_type
_a = sampling_factor
_a = distil
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : List[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 709 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCAmelCase_ : Tuple = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
lowerCAmelCase_ : Union[str, Any] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
lowerCAmelCase_ : Tuple = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def UpperCamelCase__ ( self : Optional[int] , __a : List[Any] , __a : str , __a : int=None , __a : Dict=True , __a : Optional[int]=False ):
if rouge_types is None:
_a = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
_a = rouge_scorer.RougeScorer(rouge_types=__a , use_stemmer=__a )
if use_aggregator:
_a = scoring.BootstrapAggregator()
else:
_a = []
for ref, pred in zip(__a , __a ):
_a = scorer.score(__a , __a )
if use_aggregator:
aggregator.add_scores(__a )
else:
scores.append(__a )
if use_aggregator:
_a = aggregator.aggregate()
else:
_a = {}
for key in scores[0]:
_a = [score[key] for score in scores]
return result
| 521 | 0 |
'''simple docstring'''
from torch import nn
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> Dict:
'''simple docstring'''
super().__init__()
UpperCamelCase : Optional[int] = class_size
UpperCamelCase : Any = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCamelCase : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Dict = self.mlp(_SCREAMING_SNAKE_CASE )
return logits
| 173 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase_ : Tuple = BloomConfig()
else:
UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase )
if shard_model:
UpperCAmelCase_ : Any = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = BloomConfig()
for j, file in enumerate(_lowercase ):
print('''Processing file: {}'''.format(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : Dict = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp
torch.save(
_lowercase , os.path.join(
_lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) )
UpperCAmelCase_ : List[Any] = BloomConfig()
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : List[str] = total_size
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n'''
f.write(_lowercase )
else:
UpperCAmelCase_ : Any = BloomModel(_lowercase )
UpperCAmelCase_ : Tuple = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = None
for i, file in enumerate(_lowercase ):
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : str = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Dict = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp
UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 30 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = """unispeech"""
def __init__( self , __UpperCAmelCase=3_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=1_6 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=1_0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1_0 , __UpperCAmelCase=0 , __UpperCAmelCase=3_2_0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=8_0 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = hidden_size
lowerCAmelCase__ :Tuple = feat_extract_norm
lowerCAmelCase__ :Optional[int] = feat_extract_activation
lowerCAmelCase__ :int = list(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = list(__UpperCAmelCase )
lowerCAmelCase__ :Any = list(__UpperCAmelCase )
lowerCAmelCase__ :Dict = conv_bias
lowerCAmelCase__ :Any = num_conv_pos_embeddings
lowerCAmelCase__ :Any = num_conv_pos_embedding_groups
lowerCAmelCase__ :int = len(self.conv_dim )
lowerCAmelCase__ :Optional[Any] = num_hidden_layers
lowerCAmelCase__ :List[str] = intermediate_size
lowerCAmelCase__ :Union[str, Any] = hidden_act
lowerCAmelCase__ :List[str] = num_attention_heads
lowerCAmelCase__ :Optional[int] = hidden_dropout
lowerCAmelCase__ :Any = attention_dropout
lowerCAmelCase__ :Union[str, Any] = activation_dropout
lowerCAmelCase__ :str = feat_proj_dropout
lowerCAmelCase__ :Tuple = final_dropout
lowerCAmelCase__ :str = layerdrop
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Any = initializer_range
lowerCAmelCase__ :Any = num_ctc_classes
lowerCAmelCase__ :List[str] = vocab_size
lowerCAmelCase__ :Optional[Any] = do_stable_layer_norm
lowerCAmelCase__ :List[str] = use_weighted_layer_sum
lowerCAmelCase__ :Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ :List[str] = apply_spec_augment
lowerCAmelCase__ :int = mask_time_prob
lowerCAmelCase__ :Optional[Any] = mask_time_length
lowerCAmelCase__ :str = mask_time_min_masks
lowerCAmelCase__ :int = mask_feature_prob
lowerCAmelCase__ :Optional[int] = mask_feature_length
lowerCAmelCase__ :Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ :Any = num_codevectors_per_group
lowerCAmelCase__ :Dict = num_codevector_groups
lowerCAmelCase__ :int = contrastive_logits_temperature
lowerCAmelCase__ :Dict = feat_quantizer_dropout
lowerCAmelCase__ :Tuple = num_negatives
lowerCAmelCase__ :Tuple = codevector_dim
lowerCAmelCase__ :List[str] = proj_codevector_dim
lowerCAmelCase__ :Tuple = diversity_loss_weight
# ctc loss
lowerCAmelCase__ :Union[str, Any] = ctc_loss_reduction
lowerCAmelCase__ :List[str] = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ :List[Any] = replace_prob
@property
def snake_case ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 560 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[str] = """SpeechT5FeatureExtractor"""
__magic_name__ :List[Any] = """SpeechT5Tokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = kwargs.pop('audio' , __UpperCAmelCase )
lowerCAmelCase__ :int = kwargs.pop('text' , __UpperCAmelCase )
lowerCAmelCase__ :Any = kwargs.pop('text_target' , __UpperCAmelCase )
lowerCAmelCase__ :int = kwargs.pop('audio_target' , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = kwargs.pop('sampling_rate' , __UpperCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
lowerCAmelCase__ :List[str] = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
elif text is not None:
lowerCAmelCase__ :str = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
else:
lowerCAmelCase__ :Any = None
if audio_target is not None:
lowerCAmelCase__ :int = self.feature_extractor(audio_target=__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :int = targets['input_values']
elif text_target is not None:
lowerCAmelCase__ :Optional[int] = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Dict = targets['input_ids']
else:
lowerCAmelCase__ :Dict = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase__ :Union[str, Any] = labels
lowerCAmelCase__ :Dict = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase__ :Dict = decoder_attention_mask
return inputs
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = kwargs.pop('input_values' , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = kwargs.pop('input_ids' , __UpperCAmelCase )
lowerCAmelCase__ :Any = kwargs.pop('labels' , __UpperCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
lowerCAmelCase__ :Union[str, Any] = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
elif input_ids is not None:
lowerCAmelCase__ :Optional[int] = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase )
else:
lowerCAmelCase__ :int = None
if labels is not None:
if "input_ids" in labels or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and "input_ids" in labels[0]):
lowerCAmelCase__ :List[str] = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = targets['input_ids']
else:
lowerCAmelCase__ :Optional[int] = self.feature_extractor.feature_size
lowerCAmelCase__ :int = self.feature_extractor.num_mel_bins
lowerCAmelCase__ :Dict = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = feature_size_hack
lowerCAmelCase__ :str = targets['input_values']
else:
lowerCAmelCase__ :Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase__ :Union[str, Any] = labels
lowerCAmelCase__ :List[Any] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase__ :Tuple = decoder_attention_mask
return inputs
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
| 560 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase (__lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 489 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCamelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any]=1_3 , lowerCamelCase : str=7 , lowerCamelCase : Tuple=True , lowerCamelCase : Dict=True , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Union[str, Any]=9_9 , lowerCamelCase : str=3_2 , lowerCamelCase : List[Any]=5 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : int=3_7 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Any=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Optional[Any]=5_1_2 , lowerCamelCase : Dict=1_6 , lowerCamelCase : List[str]=2 , lowerCamelCase : Union[str, Any]=0.02 , lowerCamelCase : List[str]=4 , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_attention_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_choices
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_attention_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self : Tuple ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ = config_and_inputs
a__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __lowerCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : str = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self : int ):
'''simple docstring'''
a__ = FlaxAlbertModelTester(self )
@slow
def __a ( self : int ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a__ = model_class_name.from_pretrained("albert-base-v2" )
a__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Any ):
'''simple docstring'''
a__ = FlaxAlbertModel.from_pretrained("albert-base-v2" )
a__ = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
a__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a__ = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
a__ = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase )
a__ = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase , atol=1e-4 ) )
| 489 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
A__ : Union[str, Any] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
A__ : str = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
A__ : str = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def A_ ( self : str ) -> Any:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def A_ ( self : Tuple , __a : Optional[Any] , __a : Optional[int] , __a : Tuple = False , __a : Optional[int] = False , __a : Optional[Any] = False , __a : int = False , ) -> Any:
'''simple docstring'''
__snake_case : Dict = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__snake_case : Dict = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
__snake_case : List[str] = TER(
normalized=_UpperCAmelCase , no_punct=_UpperCAmelCase , asian_support=_UpperCAmelCase , case_sensitive=_UpperCAmelCase , )
__snake_case : Dict = sb_ter.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 713 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
A__ : Optional[int] = False
class snake_case__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : List[str] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__snake_case : str = torch.manual_seed(0 )
__snake_case : int = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
__snake_case : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(__a , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : List[str] = generator.manual_seed(0 )
__snake_case : Tuple = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = 'cyberpunk 2077'
__snake_case : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : Tuple = pipe.dual_guided(
prompt=__a , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__snake_case : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : List[str] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__snake_case : Tuple = 'A painting of a squirrel eating a burger '
__snake_case : str = torch.manual_seed(0 )
__snake_case : Optional[Any] = pipe.text_to_image(
prompt=__a , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__snake_case : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : Optional[int] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__snake_case : int = pipe.image_variation(__a , generator=__a , output_type='numpy' ).images
__snake_case : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : str = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 124 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = generator.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 82 | 0 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : List[Any] = (DDIMParallelScheduler,)
__lowercase : Optional[int] = (('''eta''', 0.0), ('''num_inference_steps''', 5_0))
def snake_case__ ( self , **__lowercase ):
"""simple docstring"""
__A : Dict = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**__lowercase )
return config
def snake_case__ ( self , **__lowercase ):
"""simple docstring"""
__A : Union[str, Any] = self.scheduler_classes[0]
__A : Optional[Any] = self.get_scheduler_config(**__lowercase )
__A : int = scheduler_class(**__lowercase )
__A ,__A : List[str] = 10, 0.0
__A : Tuple = self.dummy_model()
__A : Any = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for t in scheduler.timesteps:
__A : Dict = model(__lowercase , __lowercase )
__A : List[Any] = scheduler.step(__lowercase , __lowercase , __lowercase , __lowercase ).prev_sample
return sample
def snake_case__ ( self ):
"""simple docstring"""
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowercase )
__A : str = self.scheduler_classes[0]
__A : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
__A : List[str] = scheduler_class(**__lowercase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def snake_case__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__lowercase , beta_end=__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=__lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowercase , prediction_type=__lowercase , sample_max_value=__lowercase , )
def snake_case__ ( self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__lowercase , num_inference_steps=__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowercase , eta=__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[int] = self.scheduler_classes[0]
__A : Tuple = self.get_scheduler_config()
__A : Optional[int] = scheduler_class(**__lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4_7_7_1 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2_4_6_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.0_2 ) ) < 1E-5
def snake_case__ ( self ):
"""simple docstring"""
__A : Dict = self.scheduler_classes[0]
__A : Tuple = self.get_scheduler_config()
__A : Optional[int] = scheduler_class(**__lowercase )
__A ,__A : Dict = 10, 0.0
scheduler.set_timesteps(__lowercase )
__A : int = self.dummy_model()
__A : int = self.dummy_sample_deter
__A : List[str] = self.dummy_sample_deter + 0.1
__A : Union[str, Any] = self.dummy_sample_deter - 0.1
__A : Union[str, Any] = samplea.shape[0]
__A : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
__A : Dict = torch.arange(__lowercase )[0:3, None].repeat(1 , __lowercase )
__A : List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__A : str = scheduler.batch_step_no_noise(__lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __lowercase )
__A : Tuple = torch.sum(torch.abs(__lowercase ) )
__A : List[Any] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[int] = self.full_loop()
__A : List[str] = torch.sum(torch.abs(__lowercase ) )
__A : int = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3
def snake_case__ ( self ):
"""simple docstring"""
__A : Any = self.full_loop(prediction_type='v_prediction' )
__A : List[str] = torch.sum(torch.abs(__lowercase ) )
__A : Union[str, Any] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1E-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3
def snake_case__ ( self ):
"""simple docstring"""
__A : List[Any] = self.full_loop(set_alpha_to_one=__lowercase , beta_start=0.0_1 )
__A : Dict = torch.sum(torch.abs(__lowercase ) )
__A : str = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3
def snake_case__ ( self ):
"""simple docstring"""
__A : Dict = self.full_loop(set_alpha_to_one=__lowercase , beta_start=0.0_1 )
__A : Any = torch.sum(torch.abs(__lowercase ) )
__A : Optional[Any] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3
| 540 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase = None ):
"""simple docstring"""
if components is None:
__A : Optional[int] = []
__A : Dict = list(__lowercase )
def __len__( self ):
"""simple docstring"""
return len(self.__components )
def __str__( self ):
"""simple docstring"""
return "(" + ",".join(map(__lowercase , self.__components ) ) + ")"
def __add__( self , __lowercase ):
"""simple docstring"""
__A : Union[str, Any] = len(self )
if size == len(__lowercase ):
__A : Dict = [self.__components[i] + other.component(__lowercase ) for i in range(__lowercase )]
return Vector(__lowercase )
else:
raise Exception('must have the same size' )
def __sub__( self , __lowercase ):
"""simple docstring"""
__A : int = len(self )
if size == len(__lowercase ):
__A : str = [self.__components[i] - other.component(__lowercase ) for i in range(__lowercase )]
return Vector(__lowercase )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , __lowercase ):
"""simple docstring"""
...
@overload
def __mul__( self , __lowercase ):
"""simple docstring"""
...
def __mul__( self , __lowercase ):
"""simple docstring"""
if isinstance(__lowercase , (float, int) ):
__A : Dict = [c * other for c in self.__components]
return Vector(__lowercase )
elif isinstance(__lowercase , __lowercase ) and len(self ) == len(__lowercase ):
__A : Optional[Any] = len(self )
__A : Optional[int] = [self.__components[i] * other.component(__lowercase ) for i in range(__lowercase )]
return sum(__lowercase )
else: # error case
raise Exception('invalid operand!' )
def snake_case__ ( self ):
"""simple docstring"""
return Vector(self.__components )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def snake_case__ ( self , __lowercase , __lowercase ):
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
__A : str = value
def snake_case__ ( self ):
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__A : List[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(__lowercase ) )
def snake_case__ ( self , __lowercase , __lowercase = False ):
"""simple docstring"""
__A : List[Any] = self * other
__A : Any = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _lowercase ( UpperCamelCase__ : int ):
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
return Vector([0] * dimension )
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : int ):
assert isinstance(UpperCamelCase__, UpperCamelCase__ ) and (isinstance(UpperCamelCase__, UpperCamelCase__ ))
__A : List[str] = [0] * dimension
__A : Tuple = 1
return Vector(UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : Vector, UpperCamelCase__ : Vector ):
assert (
isinstance(UpperCamelCase__, UpperCamelCase__ )
and isinstance(UpperCamelCase__, UpperCamelCase__ )
and (isinstance(UpperCamelCase__, (int, float) ))
)
return x * scalar + y
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
random.seed(UpperCamelCase__ )
__A : Any = [random.randint(UpperCamelCase__, UpperCamelCase__ ) for _ in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
__A : str = matrix
__A : str = w
__A : Dict = h
def __str__( self ):
"""simple docstring"""
__A : Tuple = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , __lowercase ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__A : Union[str, Any] = []
for i in range(self.__height ):
__A : List[Any] = [
self.__matrix[i][j] + other.component(__lowercase , __lowercase )
for j in range(self.__width )
]
matrix.append(__lowercase )
return Matrix(__lowercase , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , __lowercase ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__A : Dict = []
for i in range(self.__height ):
__A : str = [
self.__matrix[i][j] - other.component(__lowercase , __lowercase )
for j in range(self.__width )
]
matrix.append(__lowercase )
return Matrix(__lowercase , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , __lowercase ):
"""simple docstring"""
...
@overload
def __mul__( self , __lowercase ):
"""simple docstring"""
...
def __mul__( self , __lowercase ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ): # matrix-vector
if len(__lowercase ) == self.__width:
__A : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__A : Any = [
self.__matrix[i][j] * other.component(__lowercase )
for j in range(self.__width )
]
ans.change_component(__lowercase , sum(__lowercase ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(__lowercase , (int, float) ): # matrix-scalar
__A : Optional[Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__lowercase , self.__width , self.__height )
return None
def snake_case__ ( self ):
"""simple docstring"""
return self.__height
def snake_case__ ( self ):
"""simple docstring"""
return self.__width
def snake_case__ ( self , __lowercase , __lowercase ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__A : Dict = value
else:
raise Exception('change_component: indices out of bounds' )
def snake_case__ ( self , __lowercase , __lowercase ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__A : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__lowercase ) ):
__A : Dict = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__lowercase , self.__width - 1 , self.__height - 1 ).determinant()
def snake_case__ ( self , __lowercase , __lowercase ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__lowercase , __lowercase )
else:
raise Exception('Indices out of bounds' )
def snake_case__ ( self ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__A : List[str] = [
self.__matrix[0][y] * self.cofactor(0 , __lowercase ) for y in range(self.__width )
]
return sum(__lowercase )
def _lowercase ( UpperCamelCase__ : int ):
__A : list[list[float]] = [[0] * n for _ in range(UpperCamelCase__ )]
return Matrix(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
random.seed(UpperCamelCase__ )
__A : list[list[float]] = [
[random.randint(UpperCamelCase__, UpperCamelCase__ ) for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )
]
return Matrix(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
| 540 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case__ : str = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case__ : Dict = 1_2_8_0_2_2
snake_case__ : Tuple = 1_2_8_0_2_8
@require_sentencepiece
class _a ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =MaMaaaTokenizer
snake_case =False
snake_case =False
snake_case =True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
_UpperCAmelCase =["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_UpperCAmelCase =dict(zip(_snake_case , range(len(_snake_case ) ) ) )
_UpperCAmelCase =Path(self.tmpdirname )
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_UpperCAmelCase =MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self , **_snake_case ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="</s>"
_UpperCAmelCase =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(_snake_case ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =tokenizer.tokenize("This is a test" )
self.assertListEqual(_snake_case , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [2, 3, 4, 5, 6] , )
_UpperCAmelCase =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_snake_case , ["▁This", "▁is", "▁a", "▁t", "est"] )
_UpperCAmelCase =tokenizer.convert_tokens_to_string(_snake_case )
self.assertEqual(_snake_case , "This is a test" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# fmt: off
_UpperCAmelCase ={"input_ids": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case ="""facebook/m2m100_418M"""
snake_case =[
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
snake_case =[
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
snake_case =[EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def SCREAMING_SNAKE_CASE ( cls ):
_UpperCAmelCase =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_UpperCAmelCase =1
return cls
def SCREAMING_SNAKE_CASE ( self ):
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 12_8063 )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="en"
_UpperCAmelCase =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
self.assertIn(_snake_case , self.tokenizer.all_special_ids )
# fmt: off
_UpperCAmelCase =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
_UpperCAmelCase =self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCAmelCase =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
self.assertNotIn(self.tokenizer.eos_token , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =tempfile.mkdtemp()
_UpperCAmelCase =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case )
_UpperCAmelCase =MaMaaaTokenizer.from_pretrained(_snake_case )
self.assertDictEqual(new_tok.lang_token_to_id , _snake_case )
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="en"
_UpperCAmelCase ="fr"
_UpperCAmelCase =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors="pt" )
_UpperCAmelCase =shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_UpperCAmelCase =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_UpperCAmelCase ="zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_UpperCAmelCase ="zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(_snake_case ) , {
# en_XX, A, test, EOS
"input_ids": [[12_8022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 12_8006,
} , )
| 408 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : int = get_tests_dir('fixtures/test_sentencepiece.model')
snake_case__ : Dict = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
snake_case__ : str = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class _a ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =CamembertTokenizer
snake_case =CamembertTokenizerFast
snake_case =True
snake_case =True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase =CamembertTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="<pad>"
_UpperCAmelCase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_snake_case ) , 1004 )
def SCREAMING_SNAKE_CASE ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =CamembertTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
_UpperCAmelCase =CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_UpperCAmelCase ="I was born in 92000, and this is falsé."
_UpperCAmelCase =tokenizer.encode(_snake_case )
_UpperCAmelCase =rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCAmelCase =tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_UpperCAmelCase =rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_UpperCAmelCase =tokenizer.convert_ids_to_tokens(_snake_case )
_UpperCAmelCase =rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =self.get_rust_tokenizer()
_UpperCAmelCase ="I was born in 92000, and this is falsé."
_UpperCAmelCase =tokenizer.tokenize(_snake_case )
_UpperCAmelCase =rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCAmelCase =tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_UpperCAmelCase =rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCAmelCase =self.get_rust_tokenizer()
_UpperCAmelCase =tokenizer.encode(_snake_case )
_UpperCAmelCase =rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# fmt: off
_UpperCAmelCase ={"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_UpperCAmelCase =[
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=_snake_case , )
| 408 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _A ( _lowerCamelCase ):
def __a ( self : Optional[Any] , _A : str ) -> str:
"""simple docstring"""
with open(_A , encoding='''utf-8''' ) as input_file:
lowercase : Optional[Any] = re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
lowercase : Any = input_file.read()
lowercase : Optional[int] = regexp.search(_A )
return match
def __a ( self : Optional[Any] , _A : str ) -> Any:
"""simple docstring"""
with open(_A , encoding='''utf-8''' ) as input_file:
lowercase : Union[str, Any] = re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
lowercase : Union[str, Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase : Optional[Any] = regexp.finditer(_A )
lowercase : List[Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = Path('''./datasets''' )
lowercase : Any = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_A ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def __a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase : str = Path('''./datasets''' )
lowercase : Union[str, Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_A ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" ) | 596 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase_ = get_logger(__name__)
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> Optional[Any]:
'''simple docstring'''
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase : Any = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase : Optional[Any] = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
lowercase : Union[str, Any] = os.path.join(__magic_name__ , __magic_name__ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(__magic_name__ , __magic_name__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase : List[str] = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(__magic_name__ , __magic_name__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase : Union[str, Any] = os.path.join(__magic_name__ , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
logger.info(F"""Saving model to {ckpt_dir}""" )
lowercase : Optional[Any] = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__magic_name__ , storage_writer=dist_cp.FileSystemWriter(__magic_name__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> Optional[Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__magic_name__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
lowercase : List[str] = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
lowercase : Union[str, Any] = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Loading model from {input_model_file}""" )
lowercase : Union[str, Any] = torch.load(__magic_name__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase : Tuple = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase : Any = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Loading model from {input_model_file}""" )
lowercase : Union[str, Any] = torch.load(__magic_name__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase : Optional[int] = (
os.path.join(__magic_name__ , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
lowercase : Optional[int] = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__magic_name__ , storage_reader=dist_cp.FileSystemReader(__magic_name__ ) , planner=DefaultLoadPlanner() , )
lowercase : Dict = state_dict['''model''']
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> int:
'''simple docstring'''
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase : Tuple = FSDP.optim_state_dict(__magic_name__ , __magic_name__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase : List[Any] = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__magic_name__ , __magic_name__ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
lowercase : Tuple = os.path.join(__magic_name__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(__magic_name__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> str:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase : Tuple = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase : int = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
lowercase : Optional[int] = torch.load(__magic_name__ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
lowercase : str = (
os.path.join(__magic_name__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
lowercase : int = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(__magic_name__ ) , )
lowercase : Optional[int] = optim_state['''optimizer''']
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
lowercase : int = FSDP.optim_state_dict_to_load(__magic_name__ , __magic_name__ , __magic_name__ )
optimizer.load_state_dict(__magic_name__ ) | 596 | 1 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCAmelCase_ ( *snake_case__ , snake_case__ = None , snake_case__=True , snake_case__=2 ):
'''simple docstring'''
from .. import __version__
A : List[str] = take_from
A : Optional[int] = ()
if not isinstance(args[0] , snake_case__ ):
A : Tuple = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(snake_case__ ).base_version ) >= version.parse(snake_case__ ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
A : Optional[int] = None
if isinstance(snake_case__ , snake_case__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(snake_case__ ),)
A : str = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(snake_case__ , snake_case__ ):
values += (getattr(snake_case__ , snake_case__ ),)
A : List[str] = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
A : List[str] = F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
A : str = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , snake_case__ , stacklevel=snake_case__ )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0:
A : Dict = inspect.getouterframes(inspect.currentframe() )[1]
A : int = call_frame.filename
A : List[Any] = call_frame.lineno
A : str = call_frame.function
A, A : Optional[int] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(snake_case__ ) == 0:
return
elif len(snake_case__ ) == 1:
return values[0]
return values
| 634 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
A : Optional[Any] = [x.strip() for x in open(snake_case__ ).readlines()]
A : Tuple = [x.strip() for x in open(snake_case__ ).readlines()][: len(snake_case__ )]
A : Union[str, Any] = calculate_rouge(snake_case__ , snake_case__ , **snake_case__ )
if save_path is not None:
save_json(snake_case__ , snake_case__ , indent=snake_case__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 634 | 1 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_UpperCamelCase : Optional[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _snake_case ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
lowerCAmelCase = torchvision.models.resnetaaa(pretrained=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = list(model.children() )[:-2]
lowerCAmelCase = nn.Sequential(*_SCREAMING_SNAKE_CASE )
lowerCAmelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.pool(self.model(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.flatten(_SCREAMING_SNAKE_CASE , start_dim=2 )
lowerCAmelCase = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _snake_case ( a_ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for l in open(_SCREAMING_SNAKE_CASE )]
lowerCAmelCase = os.path.dirname(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer
lowerCAmelCase = labels
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = max_seq_length
lowerCAmelCase = transforms
def __len__( self ):
'''simple docstring'''
return len(self.data )
def __getitem__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = sentence[0], sentence[1:-1], sentence[-1]
lowerCAmelCase = sentence[: self.max_seq_length]
lowerCAmelCase = torch.zeros(self.n_classes )
lowerCAmelCase = 1
lowerCAmelCase = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
lowerCAmelCase = self.transforms(_SCREAMING_SNAKE_CASE )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def snake_case ( snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = [len(row['sentence'] ) for row in batch]
lowerCAmelCase , lowerCAmelCase = len(snake_case ), max(snake_case )
lowerCAmelCase = torch.zeros(snake_case , snake_case , dtype=torch.long )
lowerCAmelCase = torch.zeros(snake_case , snake_case , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(snake_case , snake_case ) ):
lowerCAmelCase = input_row['sentence']
lowerCAmelCase = 1
lowerCAmelCase = torch.stack([row['image'] for row in batch] )
lowerCAmelCase = torch.stack([row['label'] for row in batch] )
lowerCAmelCase = torch.stack([row['image_start_token'] for row in batch] )
lowerCAmelCase = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def snake_case ( ) -> Any:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 514 |
'''simple docstring'''
def snake_case ( snake_case : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = len(snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def snake_case ( snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if len(snake_case ) <= 1:
return arr, 0
lowerCAmelCase = len(snake_case ) // 2
lowerCAmelCase = arr[0:mid]
lowerCAmelCase = arr[mid:]
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
lowerCAmelCase , lowerCAmelCase = _count_cross_inversions(snake_case , snake_case )
lowerCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def snake_case ( snake_case : Union[str, Any] , snake_case : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = 0
while i < len(snake_case ) and j < len(snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowerCAmelCase = count_inversions_bf(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowerCAmelCase = count_inversions_bf(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , snake_case )
# an empty list should also have zero inversions
lowerCAmelCase = []
lowerCAmelCase = count_inversions_bf(snake_case )
lowerCAmelCase , lowerCAmelCase = count_inversions_recursive(snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , snake_case )
if __name__ == "__main__":
main()
| 514 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , ):
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = 13
UpperCamelCase__ = 7
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = 2
UpperCamelCase__ = 99
UpperCamelCase__ = 0
UpperCamelCase__ = 32
UpperCamelCase__ = 2
UpperCamelCase__ = 4
UpperCamelCase__ = 0.1
UpperCamelCase__ = 0.1
UpperCamelCase__ = 512
UpperCamelCase__ = 16
UpperCamelCase__ = 2
UpperCamelCase__ = 0.02
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = "last"
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = 0
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ = None
if self.use_input_lengths:
UpperCamelCase__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCamelCase__ = TFFlaubertModel(config=snake_case )
UpperCamelCase__ = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
UpperCamelCase__ = model(snake_case )
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCamelCase__ = TFFlaubertWithLMHeadModel(snake_case )
UpperCamelCase__ = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
UpperCamelCase__ = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCamelCase__ = TFFlaubertForQuestionAnsweringSimple(snake_case )
UpperCamelCase__ = {"input_ids": input_ids, "lengths": input_lengths}
UpperCamelCase__ = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCamelCase__ = TFFlaubertForSequenceClassification(snake_case )
UpperCamelCase__ = {"input_ids": input_ids, "lengths": input_lengths}
UpperCamelCase__ = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFFlaubertForTokenClassification(config=snake_case )
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase__ = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFFlaubertForMultipleChoice(config=snake_case )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
),
) = config_and_inputs
UpperCamelCase__ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Any = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : str = (
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : Dict = False
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = TFFlaubertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case , emb_dim=37 )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = TFFlaubertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
UpperCamelCase__ = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ = model(snake_case )[0]
UpperCamelCase__ = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , snake_case )
# compare the actual values for a slice.
UpperCamelCase__ = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 551 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : int = (DDIMParallelScheduler,)
_UpperCamelCase : List[Any] = (('eta', 0.0), ('num_inference_steps', 50))
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
UpperCamelCase__ = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**snake_case )
return config
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(**snake_case )
UpperCamelCase__ = scheduler_class(**snake_case )
UpperCamelCase__, UpperCamelCase__ = 10, 0.0
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(snake_case )
for t in scheduler.timesteps:
UpperCamelCase__ = model(snake_case , snake_case )
UpperCamelCase__ = scheduler.step(snake_case , snake_case , snake_case , snake_case ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case )
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__ = scheduler_class(**snake_case )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def snake_case__ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , )
def snake_case__ ( self ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=snake_case , num_inference_steps=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=snake_case , eta=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**snake_case )
UpperCamelCase__, UpperCamelCase__ = 10, 0.0
scheduler.set_timesteps(snake_case )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
UpperCamelCase__ = self.dummy_sample_deter + 0.1
UpperCamelCase__ = self.dummy_sample_deter - 0.1
UpperCamelCase__ = samplea.shape[0]
UpperCamelCase__ = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase__ = torch.arange(snake_case )[0:3, None].repeat(1 , snake_case )
UpperCamelCase__ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase__ = scheduler.batch_step_no_noise(snake_case , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case )
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.full_loop()
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.full_loop(set_alpha_to_one=snake_case , beta_start=0.01 )
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.full_loop(set_alpha_to_one=snake_case , beta_start=0.01 )
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 551 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A_ ( __lowercase , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = DebertaTokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : List[Any] = DebertaTokenizerFast
def snake_case__ ( self) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
_UpperCAmelCase : Union[str, Any] = dict(zip(_A , range(len(_A))))
_UpperCAmelCase : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_UpperCAmelCase : Tuple = {'''unk_token''': '''[UNK]'''}
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(_A) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(_A))
def snake_case__ ( self , **_A) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A)
def snake_case__ ( self , _A) -> str:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = '''lower newer'''
_UpperCAmelCase : Dict = '''lower newer'''
return input_text, output_text
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = '''lower newer'''
_UpperCAmelCase : Any = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_UpperCAmelCase : List[str] = tokenizer.tokenize(_A)
self.assertListEqual(_A , _A)
_UpperCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_UpperCAmelCase : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A) , _A)
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = tokenizer('''Hello''' , '''World''')
_UpperCAmelCase : Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , _A)
@slow
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''')
_UpperCAmelCase : int = tokenizer.encode('''sequence builders''' , add_special_tokens=_A)
_UpperCAmelCase : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A)
_UpperCAmelCase : Optional[Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=_A , add_prefix_space=_A)
_UpperCAmelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_A , add_prefix_space=_A)
_UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(_A)
_UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(_A , _A)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case__ ( self) -> int:
"""simple docstring"""
_UpperCAmelCase : int = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
_UpperCAmelCase : str = tokenizer_class.from_pretrained('''microsoft/deberta-base''')
_UpperCAmelCase : int = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
_UpperCAmelCase : Any = tokenizer(_A , padding=_A)
_UpperCAmelCase : List[str] = [tokenizer.decode(_A , skip_special_tokens=_A) for seq in encoding['''input_ids''']]
# fmt: off
_UpperCAmelCase : str = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_UpperCAmelCase : Any = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , _A)
for expected, decoded in zip(_A , _A):
self.assertEqual(_A , _A)
| 709 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string'''),
'''references''': datasets.Value('''string'''),
}) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def snake_case__ ( self , _A , _A) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Tuple = 0.0
for i, j in zip(_A , _A):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A) else 0.0
_UpperCAmelCase : Tuple = n_correct / len(_A)
return {
"accuracy": accuracy,
}
| 186 | 0 |
import math
def UpperCamelCase_( _A :int )-> str:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase__ = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def UpperCamelCase_( _A :Optional[Any] , _A :Optional[int]=1 , **_A :str )-> List[Any]:
UpperCamelCase__ = factor * value
UpperCamelCase__ = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 551 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int , lowerCamelCase_: int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 449 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCAmelCase : str = "examples/"
UpperCAmelCase : Any = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
UpperCAmelCase : List[str] = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
UpperCAmelCase : Tuple = "README.md"
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : Optional[Any] = f.read()
lowerCamelCase__ , lowerCamelCase__ : str = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : List[str] = replace.replace('''VERSION''' , _lowercase )
lowerCamelCase__ : Optional[int] = re_pattern.sub(_lowercase , _lowercase )
with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_lowercase )
def __a ( _lowercase ):
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_lowercase , _lowercase ) , _lowercase , pattern='''examples''' )
def __a ( _lowercase , _lowercase=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowercase , _lowercase , _lowercase )
if not patch:
update_version_in_examples(_lowercase )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = '''🤗 Transformers currently provides the following architectures'''
lowerCamelCase__ : List[Any] = '''1. Want to contribute a new model?'''
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : List[str] = f.readlines()
# Find the start of the list.
lowerCamelCase__ : str = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCamelCase__ : str = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_lowercase )
def __a ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Union[str, Any] = REPLACE_PATTERNS['''init'''][0].search(_lowercase ).groups()[0]
return packaging.version.parse(_lowercase )
def __a ( _lowercase=False ):
"""simple docstring"""
lowerCamelCase__ : List[str] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCamelCase__ : Any = default_version.base_version
elif patch:
lowerCamelCase__ : Optional[Any] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowerCamelCase__ : Tuple = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Optional[Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_lowercase ) == 0:
lowerCamelCase__ : Any = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowercase , patch=_lowercase )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : List[str] = get_version()
lowerCamelCase__ : List[Any] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowerCamelCase__ : str = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_lowercase ) == 0:
lowerCamelCase__ : int = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowercase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
UpperCAmelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 121 | """simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
UpperCAmelCase : Dict[Optional[str], str] = {}
UpperCAmelCase : Dict[Optional[str], Exception] = {}
def __a ( _lowercase , _lowercase , _lowercase = None , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
lowerCamelCase__ : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
lowerCamelCase__ : Any = format_type
def __a ( _lowercase , _lowercase , _lowercase = None ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCamelCase__ : str = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
UpperCAmelCase : Optional[int] = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
UpperCAmelCase : Dict = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
UpperCAmelCase : Union[str, Any] = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def __a ( _lowercase ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __a ( _lowercase , **_lowercase ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = get_format_type_from_alias(_lowercase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_lowercase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 121 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__snake_case = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 178 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""PoolFormerFeatureExtractor"""]
__snake_case = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 178 | 1 |
"""simple docstring"""
import random
from typing import Any
def lowercase_ ( __UpperCAmelCase ) -> list[Any]:
for _ in range(len(UpperCAmelCase__ ) ):
lowerCAmelCase__ : Any = random.randint(0 , len(UpperCAmelCase__ ) - 1 )
lowerCAmelCase__ : List[Any] = random.randint(0 , len(UpperCAmelCase__ ) - 1 )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = data[b], data[a]
return data
if __name__ == "__main__":
_A = [0, 1, 2, 3, 4, 5, 6, 7]
_A = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 715 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = inspect.getfile(accelerate.test_utils )
lowerCAmelCase__ : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCAmelCase__ : List[str] = test_metrics
@require_cpu
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowerCAmelCase__ : str = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
| 507 | 0 |
import argparse
from collections import defaultdict
def UpperCamelCase( __UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
lowerCAmelCase_ : Dict = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCamelCase ,'''r''' ) as f:
lowerCAmelCase_ : Union[str, Any] = f.readlines()
lowerCAmelCase_ : List[Any] = f"""class {class_name}("""
lowerCAmelCase_ : Union[str, Any] = f"""{4 * " "}def {test_name}("""
lowerCAmelCase_ : int = f"""{8 * " "}{correct_line.split()[0]}"""
lowerCAmelCase_ : Tuple = f"""{16 * " "}{correct_line.split()[0]}"""
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = []
for line in lines:
if line.startswith(__UpperCamelCase ):
lowerCAmelCase_ : Optional[int] = True
elif in_class and line.startswith(__UpperCamelCase ):
lowerCAmelCase_ : Tuple = True
elif in_class and in_func and (line.startswith(__UpperCamelCase ) or line.startswith(__UpperCamelCase )):
lowerCAmelCase_ : Dict = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCAmelCase_ : Any = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCAmelCase_ : Union[str, Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
lowerCAmelCase_ : Tuple = False
else:
new_lines.append(__UpperCamelCase )
with open(__UpperCamelCase ,'''w''' ) as f:
for line in new_lines:
f.write(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Tuple ,__UpperCamelCase : str=None ):
if fail is not None:
with open(__UpperCamelCase ,'''r''' ) as f:
lowerCAmelCase_ : Dict = {l.strip() for l in f.readlines()}
else:
lowerCAmelCase_ : Dict = None
with open(__UpperCamelCase ,'''r''' ) as f:
lowerCAmelCase_ : Optional[int] = f.readlines()
lowerCAmelCase_ : Tuple = defaultdict(__UpperCamelCase )
for line in correct_lines:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
A__ : List[str] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 171 |
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ):
lowerCAmelCase_ : Any = len(__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = []
for i in range(len(__UpperCamelCase ) - pat_len + 1 ):
lowerCAmelCase_ : str = True
for j in range(__UpperCamelCase ):
if s[i + j] != pattern[j]:
lowerCAmelCase_ : List[Any] = False
break
if match_found:
position.append(__UpperCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 171 | 1 |
"""simple docstring"""
import operator as op
def a__ ( __lowercase ) -> str:
_A = []
_A = lambda __lowercase , __lowercase : int(x / y ) # noqa: E731 integer division operation
_A = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(__lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
else:
_A = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
_A = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
stack.append(
str(opr[x](int(__lowercase ) , int(__lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
a_ = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix)) | 721 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = "roberta"
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = "transformer"
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = f'''{prefix}.embeddings.{w}.weight'''
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[f'''lm_head.dense.{w}''']
a_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[f'''{prefix}.ln_f.{w}''']
a_ = state_dict["lm_head.weight"]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint) | 621 | 0 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__magic_name__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__magic_name__ = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
__magic_name__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__magic_name__ = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__magic_name__ = "allenai"
def _lowerCamelCase ( UpperCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ = dict((re.sub(R'@@$','',UpperCAmelCase__ ), v) if k.endswith('@@' ) else (re.sub(R'$','</w>',UpperCAmelCase__ ), v) for k, v in d.items() )
a__ = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
a__ = d[k] # restore
return da
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
assert os.path.exists(UpperCAmelCase__ )
os.makedirs(UpperCAmelCase__,exist_ok=UpperCAmelCase__ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
a__ = basename(UpperCAmelCase__ )
a__ = dirname(UpperCAmelCase__ )
a__ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
a__ = cls.hub_models()
a__ = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
a__ = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
a__ = hub_utils.from_pretrained(
UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,archive_map=UpperCAmelCase__,**UpperCAmelCase__ )
a__ = vars(chkpt['args']['model'] )
a__ = args['source_lang']
a__ = args['target_lang']
a__ = dirname(UpperCAmelCase__ )
a__ = basename(UpperCAmelCase__ )
# dicts
a__ = os.path.join(UpperCAmelCase__,f'''dict.{src_lang}.txt''' )
a__ = os.path.join(UpperCAmelCase__,f'''dict.{tgt_lang}.txt''' )
a__ = Dictionary.load(UpperCAmelCase__ )
a__ = rewrite_dict_keys(src_dict.indices )
a__ = len(UpperCAmelCase__ )
a__ = os.path.join(UpperCAmelCase__,'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(UpperCAmelCase__,'w',encoding='utf-8' ) as f:
f.write(json.dumps(UpperCAmelCase__,ensure_ascii=UpperCAmelCase__,indent=UpperCAmelCase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
a__ = True
for k in src_vocab.keys():
if not k.islower():
a__ = False
break
a__ = Dictionary.load(UpperCAmelCase__ )
a__ = rewrite_dict_keys(tgt_dict.indices )
a__ = len(UpperCAmelCase__ )
a__ = os.path.join(UpperCAmelCase__,'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(UpperCAmelCase__,'w',encoding='utf-8' ) as f:
f.write(json.dumps(UpperCAmelCase__,ensure_ascii=UpperCAmelCase__,indent=UpperCAmelCase__ ) )
# merges_file (bpecodes)
a__ = os.path.join(UpperCAmelCase__,VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
a__ = os.path.join(UpperCAmelCase__,UpperCAmelCase__ )
if os.path.exists(UpperCAmelCase__ ):
break
with open(UpperCAmelCase__,encoding='utf-8' ) as fin:
a__ = fin.read()
a__ = re.sub(R' \d+$','',UpperCAmelCase__,0,re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(UpperCAmelCase__,'w',encoding='utf-8' ) as fout:
fout.write(UpperCAmelCase__ )
# model config
a__ = os.path.join(UpperCAmelCase__,'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
a__ = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
a__ = 5
a__ = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
a__ = best_score_hparams[model_dir]['length_penalty']
else:
a__ = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(UpperCAmelCase__,'w',encoding='utf-8' ) as f:
f.write(json.dumps(UpperCAmelCase__,ensure_ascii=UpperCAmelCase__,indent=UpperCAmelCase__ ) )
# tokenizer config
a__ = os.path.join(UpperCAmelCase__,UpperCAmelCase__ )
a__ = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(UpperCAmelCase__,'w',encoding='utf-8' ) as f:
f.write(json.dumps(UpperCAmelCase__,ensure_ascii=UpperCAmelCase__,indent=UpperCAmelCase__ ) )
# model
a__ = chkpt['models'][0]
a__ = model.state_dict()
# rename keys to start with 'model.'
a__ = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
a__ = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase__,UpperCAmelCase__ )
a__ = FSMTConfig.from_pretrained(UpperCAmelCase__ )
a__ = FSMTForConditionalGeneration(UpperCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase__,strict=UpperCAmelCase__ )
# save
a__ = os.path.join(UpperCAmelCase__,UpperCAmelCase__ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCAmelCase__,UpperCAmelCase__ )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 232 | """simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__magic_name__ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
__magic_name__ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ = create_model(
'HTSAT-tiny','roberta',UpperCAmelCase__,precision='fp32',device='cuda:0' if torch.cuda.is_available() else 'cpu',enable_fusion=UpperCAmelCase__,fusion_type='aff_2d' if enable_fusion else None,)
return model, model_cfg
def _lowerCamelCase ( UpperCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ = {}
a__ = R'.*sequential.(\d+).*'
a__ = R'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
a__ = key.replace(UpperCAmelCase__,UpperCAmelCase__ )
if re.match(UpperCAmelCase__,UpperCAmelCase__ ):
# replace sequential layers with list
a__ = re.match(UpperCAmelCase__,UpperCAmelCase__ ).group(1 )
a__ = key.replace(f'''sequential.{sequential_layer}.''',f'''layers.{int(UpperCAmelCase__ )//3}.linear.''' )
elif re.match(UpperCAmelCase__,UpperCAmelCase__ ):
a__ = int(re.match(UpperCAmelCase__,UpperCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
a__ = 1 if projecton_layer == 0 else 2
a__ = key.replace(f'''_projection.{projecton_layer}.''',f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
a__ = value
a__ = mixed_qkv.size(0 ) // 3
a__ = mixed_qkv[:qkv_dim]
a__ = mixed_qkv[qkv_dim : qkv_dim * 2]
a__ = mixed_qkv[qkv_dim * 2 :]
a__ = query_layer
a__ = key_layer
a__ = value_layer
else:
a__ = value
return model_state_dict
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__=False ) -> Any:
'''simple docstring'''
a__ , a__ = init_clap(UpperCAmelCase__,enable_fusion=UpperCAmelCase__ )
clap_model.eval()
a__ = clap_model.state_dict()
a__ = rename_state_dict(UpperCAmelCase__ )
a__ = ClapConfig()
a__ = enable_fusion
a__ = ClapModel(UpperCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(UpperCAmelCase__,strict=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
transformers_config.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
__magic_name__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 232 | 1 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , *__a , **__a ):
"""simple docstring"""
super().__init__(*__a , **__a )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _UpperCAmelCase ( self , __a=None ):
"""simple docstring"""
A__ = {}
if top_k is not None:
A__ = top_k
return {}, {}, postprocess_params
def __call__( self , __a , **__a ):
"""simple docstring"""
return super().__call__(__a , **__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = load_image(__a )
A__ = self.image_processor(images=__a , return_tensors=self.framework )
return model_inputs
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.model(**__a )
return model_outputs
def _UpperCAmelCase ( self , __a , __a=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(__a )
elif self.framework == "tf":
A__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
A__ = tf.math.top_k(__a , k=__a )
A__ , A__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 554 |
"""simple docstring"""
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 554 | 1 |
from ...processing_utils import ProcessorMixin
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = ['''image_processor''', '''feature_extractor''']
UpperCamelCase : Dict = '''TvltImageProcessor'''
UpperCamelCase : Tuple = '''TvltFeatureExtractor'''
def __init__( self , _A , _A ):
super().__init__(image_processor=_A , feature_extractor=_A )
__A : Tuple = image_processor
__A : Tuple = feature_extractor
def __call__( self , _A=None , _A=None , _A=None , _A=None , _A=False , _A=False , *_A , **_A , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
__A : Dict = None
if images is not None:
__A : Optional[int] = self.image_processor(_A , mask_pixel=_A , *_A , **_A )
if images_mixed is not None:
__A : Union[str, Any] = self.image_processor(_A , is_mixed=_A , *_A , **_A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(
_A , *_A , sampling_rate=_A , mask_audio=_A , **_A )
__A : Tuple = {}
if audio is not None:
output_dict.update(_A )
if images is not None:
output_dict.update(_A )
if images_mixed_dict is not None:
output_dict.update(_A )
return output_dict
@property
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.image_processor.model_input_names
__A : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 239 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase_ ( self ):
__A , __A : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=_A , dtype=jnp.bfloataa )
__A , __A : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_A , from_pt=_A , dtype=jnp.bfloataa )
__A : Optional[Any] = controlnet_params
__A : Optional[int] = 'bird'
__A : List[str] = jax.device_count()
__A : Any = pipe.prepare_text_inputs([prompts] * num_samples )
__A : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
__A : List[str] = pipe.prepare_image_inputs([canny_image] * num_samples )
__A : List[str] = jax.random.PRNGKey(0 )
__A : List[str] = jax.random.split(_A , jax.device_count() )
__A : int = replicate(_A )
__A : Optional[Any] = shard(_A )
__A : List[str] = shard(_A )
__A : str = pipe(
prompt_ids=_A , image=_A , params=_A , prng_seed=_A , num_inference_steps=50 , jit=_A , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__A : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__A : Dict = images[0, 253:256, 253:256, -1]
__A : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A : List[str] = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
__A , __A : List[Any] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=_A , dtype=jnp.bfloataa )
__A , __A : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_A , from_pt=_A , dtype=jnp.bfloataa )
__A : Optional[int] = controlnet_params
__A : Tuple = 'Chef in the kitchen'
__A : Optional[int] = jax.device_count()
__A : Optional[int] = pipe.prepare_text_inputs([prompts] * num_samples )
__A : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
__A : Dict = pipe.prepare_image_inputs([pose_image] * num_samples )
__A : int = jax.random.PRNGKey(0 )
__A : Dict = jax.random.split(_A , jax.device_count() )
__A : str = replicate(_A )
__A : Union[str, Any] = shard(_A )
__A : List[Any] = shard(_A )
__A : Any = pipe(
prompt_ids=_A , image=_A , params=_A , prng_seed=_A , num_inference_steps=50 , jit=_A , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__A : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__A : List[Any] = images[0, 253:256, 253:256, -1]
__A : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A : Optional[Any] = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 239 | 1 |
from math import sqrt
def _UpperCamelCase ( UpperCamelCase_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( UpperCamelCase_ : int = 1_0001 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCamelCase_ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCamelCase_ ):
count += 1
return number
if __name__ == "__main__":
print(f'{solution() = }')
| 720 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__snake_case : str = (7_20, 12_80) # Height, Width
__snake_case : Dict = (0.4, 0.6) # if height or width lower than this scale, drop it.
__snake_case : Dict = 1 / 1_00
__snake_case : Optional[int] = """"""
__snake_case : Union[str, Any] = """"""
__snake_case : List[str] = """"""
__snake_case : List[str] = 2_50
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = get_dataset(UpperCamelCase_ , UpperCamelCase_ )
for index in range(UpperCamelCase_ ):
lowerCAmelCase__ = random.sample(range(len(UpperCamelCase_ ) ) , 4 )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = update_image_and_anno(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , filter_scale=UpperCamelCase_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCAmelCase__ = random_chars(32 )
lowerCAmelCase__ = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
lowerCAmelCase__ = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , UpperCamelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
lowerCAmelCase__ = []
for anno in new_annos:
lowerCAmelCase__ = anno[3] - anno[1]
lowerCAmelCase__ = anno[4] - anno[2]
lowerCAmelCase__ = anno[1] + width / 2
lowerCAmelCase__ = anno[2] + height / 2
lowerCAmelCase__ = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(UpperCamelCase_ )
with open(F"{file_root}.txt" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> tuple[list, list]:
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for label_file in glob.glob(os.path.join(UpperCamelCase_ , '*.txt' ) ):
lowerCAmelCase__ = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(UpperCamelCase_ ) as in_file:
lowerCAmelCase__ = in_file.readlines()
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , F"{label_name}.jpg" )
lowerCAmelCase__ = []
for obj_list in obj_lists:
lowerCAmelCase__ = obj_list.rstrip('\n' ).split(' ' )
lowerCAmelCase__ = float(obj[1] ) - float(obj[3] ) / 2
lowerCAmelCase__ = float(obj[2] ) - float(obj[4] ) / 2
lowerCAmelCase__ = float(obj[1] ) + float(obj[3] ) / 2
lowerCAmelCase__ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase_ )
labels.append(UpperCamelCase_ )
return img_paths, labels
def _UpperCamelCase ( UpperCamelCase_ : list , UpperCamelCase_ : list , UpperCamelCase_ : list[int] , UpperCamelCase_ : tuple[int, int] , UpperCamelCase_ : tuple[float, float] , UpperCamelCase_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
lowerCAmelCase__ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowerCAmelCase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase__ = int(scale_x * output_size[1] )
lowerCAmelCase__ = int(scale_y * output_size[0] )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i, index in enumerate(UpperCamelCase_ ):
lowerCAmelCase__ = all_img_list[index]
path_list.append(UpperCamelCase_ )
lowerCAmelCase__ = all_annos[index]
lowerCAmelCase__ = cva.imread(UpperCamelCase_ )
if i == 0: # top-left
lowerCAmelCase__ = cva.resize(UpperCamelCase_ , (divid_point_x, divid_point_y) )
lowerCAmelCase__ = img
for bbox in img_annos:
lowerCAmelCase__ = bbox[1] * scale_x
lowerCAmelCase__ = bbox[2] * scale_y
lowerCAmelCase__ = bbox[3] * scale_x
lowerCAmelCase__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowerCAmelCase__ = cva.resize(UpperCamelCase_ , (output_size[1] - divid_point_x, divid_point_y) )
lowerCAmelCase__ = img
for bbox in img_annos:
lowerCAmelCase__ = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase__ = bbox[2] * scale_y
lowerCAmelCase__ = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowerCAmelCase__ = cva.resize(UpperCamelCase_ , (divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase__ = img
for bbox in img_annos:
lowerCAmelCase__ = bbox[1] * scale_x
lowerCAmelCase__ = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase__ = bbox[3] * scale_x
lowerCAmelCase__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowerCAmelCase__ = cva.resize(
UpperCamelCase_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase__ = img
for bbox in img_annos:
lowerCAmelCase__ = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase__ = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase__ = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowerCAmelCase__ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _UpperCamelCase ( UpperCamelCase_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowerCAmelCase__ = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase_ ) for _ in range(UpperCamelCase_ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 365 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase : str ):
if not head:
return True
# split the list to two parts
lowerCamelCase_ , lowerCamelCase_ = head.next, head
while fast and fast.next:
lowerCamelCase_ = fast.next.next
lowerCamelCase_ = slow.next
lowerCamelCase_ = slow.next
lowerCamelCase_ = None # Don't forget here! But forget still works!
# reverse the second part
lowerCamelCase_ = None
while second:
lowerCamelCase_ = second.next
lowerCamelCase_ = node
lowerCamelCase_ = second
lowerCamelCase_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCamelCase_ = node.next
lowerCamelCase_ = head.next
return True
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCamelCase_ = lowerCamelCase_ = lowerCamelCase_ = head
while fast and fast.next:
lowerCamelCase_ , lowerCamelCase_ = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCamelCase_ = [slow.val]
while slow.next:
lowerCamelCase_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCamelCase_ = cur.next
return True
def lowerCamelCase_ ( _lowerCamelCase : List[str] ):
if not head or not head.next:
return True
lowerCamelCase_ = {}
lowerCamelCase_ = 0
while head:
if head.val in d:
d[head.val].append(_lowerCamelCase )
else:
lowerCamelCase_ = [pos]
lowerCamelCase_ = head.next
pos += 1
lowerCamelCase_ = pos - 1
lowerCamelCase_ = 0
for v in d.values():
if len(_lowerCamelCase ) % 2 != 0:
middle += 1
else:
lowerCamelCase_ = 0
for i in range(0 , len(_lowerCamelCase ) ):
if v[i] + v[len(_lowerCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True | 142 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__lowercase : List[str] = logging.getLogger(__name__)
__lowercase : Dict = 5_0 # max width of layer names
__lowercase : Any = 7_0 # max width of quantizer names
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
lowerCamelCase_ = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=_lowerCamelCase , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=_lowerCamelCase , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=_lowerCamelCase , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=_lowerCamelCase , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=_lowerCamelCase , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=_lowerCamelCase , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def lowerCamelCase_ ( _lowerCamelCase : str ):
if args.calibrator == "max":
lowerCamelCase_ = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
lowerCamelCase_ = '''histogram'''
elif args.calibrator == "mse":
lowerCamelCase_ = '''histogram'''
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
lowerCamelCase_ = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCamelCase )
lowerCamelCase_ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Any=False , _lowerCamelCase : List[Any]=False ):
logger.info('''Configuring Model for Quantization''' )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_lowerCamelCase , ['''embeddings'''] , which='''weight''' , _disabled=_lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(_lowerCamelCase , [''''''] , _disabled=_lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_lowerCamelCase , args.quant_disable_keyword , _disabled=_lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=_lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=_lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(_lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(_lowerCamelCase , _lowerCamelCase )
if args.clip_gelu:
clip_gelu(_lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Dict ):
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def lowerCamelCase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ):
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Any ):
def fusea(_lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : str ):
for mod in [qq, qk, qv]:
if not hasattr(_lowerCamelCase , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
lowerCamelCase_ = qq._amax.detach().item()
lowerCamelCase_ = qk._amax.detach().item()
lowerCamelCase_ = qv._amax.detach().item()
lowerCamelCase_ = max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
qq._amax.fill_(_lowerCamelCase )
qk._amax.fill_(_lowerCamelCase )
qv._amax.fill_(_lowerCamelCase )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ):
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
lowerCamelCase_ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCamelCase )
lowerCamelCase_ = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def lowerCamelCase_ ( _lowerCamelCase : Any ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
lowerCamelCase_ = mod.weight.shape[0]
lowerCamelCase_ = mod._weight_quantizer._amax.detach()
lowerCamelCase_ = torch.ones(_lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCamelCase_ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCamelCase_ = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCamelCase_ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCamelCase , keepdims=_lowerCamelCase ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
lowerCamelCase_ = amax
def lowerCamelCase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict=2_5 , _lowerCamelCase : List[Any]=1_8_0 , _lowerCamelCase : str=None ):
if ignore is None:
lowerCamelCase_ = []
elif not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = [ignore]
lowerCamelCase_ = 0
for name, mod in model.named_modules():
if not hasattr(_lowerCamelCase , '''weight''' ):
continue
lowerCamelCase_ = max(_lowerCamelCase , len(_lowerCamelCase ) )
for name, mod in model.named_modules():
lowerCamelCase_ = getattr(_lowerCamelCase , '''_input_quantizer''' , _lowerCamelCase )
lowerCamelCase_ = getattr(_lowerCamelCase , '''_weight_quantizer''' , _lowerCamelCase )
if not hasattr(_lowerCamelCase , '''weight''' ):
continue
if type(_lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(_lowerCamelCase ) is str and s in name]:
continue
lowerCamelCase_ = F"""Act:{input_q.extra_repr()}"""
lowerCamelCase_ = F"""Wgt:{weight_q.extra_repr()}"""
lowerCamelCase_ = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(_lowerCamelCase ) <= line_width:
logger.info(_lowerCamelCase )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{" ":{name_width}} {wgt_str}""" )
def lowerCamelCase_ ( _lowerCamelCase : int ):
lowerCamelCase_ = 0
for name, mod in model.named_modules():
if isinstance(_lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ):
lowerCamelCase_ = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(_lowerCamelCase , _lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def lowerCamelCase_ ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]="both" , **_lowerCamelCase : Union[str, Any] ):
lowerCamelCase_ = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , '''_input_quantizer''' , _lowerCamelCase , _lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , '''_weight_quantizer''' , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , **_lowerCamelCase : List[str] ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '''_input_quantizer''' ) or hasattr(_lowerCamelCase , '''_weight_quantizer''' ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
set_quantizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase ) | 142 | 1 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_lowerCAmelCase = get_logger(__name__)
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0 ):
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A_ : Optional[int] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A_ : int = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
A_ : Union[str, Any] = os.path.join(snake_case__ , snake_case__ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(snake_case__ , snake_case__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A_ : List[str] = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
A_ : List[str] = os.path.join(snake_case__ , snake_case__ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(snake_case__ , snake_case__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A_ : str = os.path.join(snake_case__ , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(F"""Saving model to {ckpt_dir}""" )
A_ : Dict = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
A_ : Tuple = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
A_ : Any = os.path.join(snake_case__ , snake_case__ )
logger.info(F"""Loading model from {input_model_file}""" )
A_ : List[Any] = torch.load(snake_case__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A_ : List[str] = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
A_ : Any = os.path.join(snake_case__ , snake_case__ )
logger.info(F"""Loading model from {input_model_file}""" )
A_ : List[str] = torch.load(snake_case__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A_ : Optional[Any] = (
os.path.join(snake_case__ , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
A_ : List[str] = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , )
A_ : Tuple = state_dict["""model"""]
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(snake_case__ )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0 ):
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A_ : Optional[Any] = FSDP.optim_state_dict(snake_case__ , snake_case__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
A_ : Any = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
A_ : Optional[int] = os.path.join(snake_case__ , snake_case__ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(snake_case__ , snake_case__ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
A_ : Union[str, Any] = os.path.join(snake_case__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A_ : List[str] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
A_ : Tuple = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
A_ : int = os.path.join(snake_case__ , snake_case__ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
A_ : Tuple = torch.load(snake_case__ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
A_ : str = (
os.path.join(snake_case__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
A_ : Union[str, Any] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , )
A_ : Tuple = optim_state["""optimizer"""]
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
A_ : List[Any] = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ )
optimizer.load_state_dict(snake_case__ )
| 480 |
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_lowerCAmelCase = "."
if __name__ == "__main__":
_lowerCAmelCase = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
_lowerCAmelCase = []
_lowerCAmelCase = []
with open(doctest_file_path) as fp:
for line in fp:
_lowerCAmelCase = line.strip()
_lowerCAmelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_lowerCAmelCase = "\n".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 480 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=18 , snake_case_=30 , snake_case_=4_00 , snake_case_=True , snake_case_=None , snake_case_=True , ):
lowercase =size if size is not None else {'''height''': 18, '''width''': 18}
lowercase =parent
lowercase =batch_size
lowercase =num_channels
lowercase =image_size
lowercase =min_resolution
lowercase =max_resolution
lowercase =do_resize
lowercase =size
lowercase =do_normalize
def _A( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __magic_name__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None
def _A( self ):
lowercase =ImageGPTImageProcessingTester(self )
@property
def _A( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A( self ):
lowercase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , '''clusters''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case_ , '''size''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_normalize''' ) )
def _A( self ):
lowercase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
lowercase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _A( self ):
lowercase =self.image_processing_class(**self.image_processor_dict )
lowercase =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , obj[key] ) )
else:
self.assertEqual(obj[key] , snake_case_ )
def _A( self ):
lowercase =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase =os.path.join(snake_case_ , '''image_processor.json''' )
image_processor_first.to_json_file(snake_case_ )
lowercase =self.image_processing_class.from_json_file(snake_case_ ).to_dict()
lowercase =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
def _A( self ):
lowercase =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(snake_case_ )
lowercase =self.image_processing_class.from_pretrained(snake_case_ ).to_dict()
lowercase =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def _A( self ):
pass
def UpperCamelCase ( ) -> str:
'''simple docstring'''
lowercase =load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
lowercase =Image.open(dataset[4]['''file'''] )
lowercase =Image.open(dataset[5]['''file'''] )
lowercase =[imagea, imagea]
return images
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def _A( self ):
lowercase =ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
lowercase =prepare_images()
# test non-batched
lowercase =image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
lowercase =[3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case_ )
# test batched
lowercase =image_processing(snake_case_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
lowercase =[3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case_ )
| 72 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = "xlm"
__magic_name__ : str = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , lowerCAmelCase : str=30145 , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : str=16 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=False , lowerCAmelCase : Dict=1 , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : Optional[Any]=2048**-0.5 , lowerCAmelCase : Tuple=1E-12 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[int]=1 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Dict=5 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[Any]="first" , lowerCAmelCase : Tuple=True , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[str]=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : int=5 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=0 , lowerCAmelCase : int=2 , lowerCAmelCase : List[Any]=0 , **lowerCAmelCase : List[Any] , )-> Any:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = emb_dim
UpperCAmelCase = n_layers
UpperCAmelCase = n_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = gelu_activation
UpperCAmelCase = sinusoidal_embeddings
UpperCAmelCase = causal
UpperCAmelCase = asm
UpperCAmelCase = n_langs
UpperCAmelCase = use_lang_emb
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = bos_index
UpperCAmelCase = eos_index
UpperCAmelCase = pad_index
UpperCAmelCase = unk_index
UpperCAmelCase = mask_index
UpperCAmelCase = is_encoder
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = embed_init_std
UpperCAmelCase = init_std
UpperCAmelCase = summary_type
UpperCAmelCase = summary_use_proj
UpperCAmelCase = summary_activation
UpperCAmelCase = summary_proj_to_labels
UpperCAmelCase = summary_first_dropout
UpperCAmelCase = start_n_top
UpperCAmelCase = end_n_top
UpperCAmelCase = mask_token_id
UpperCAmelCase = lang_id
if "n_words" in kwargs:
UpperCAmelCase = kwargs['''n_words''']
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , **lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
@property
def a__( self : List[str] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 210 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : Union[str, Any] = '''roc_bert'''
def __init__( self : int , a : int=30522 , a : Dict=768 , a : Optional[int]=12 , a : List[str]=12 , a : Optional[int]=3072 , a : int="gelu" , a : Tuple=0.1 , a : int=0.1 , a : Union[str, Any]=512 , a : List[Any]=2 , a : Tuple=0.02 , a : int=1E-12 , a : str=True , a : Any=0 , a : str="absolute" , a : Tuple=None , a : Tuple=True , a : Union[str, Any]=True , a : Optional[Any]=768 , a : Union[str, Any]=910 , a : Optional[int]=512 , a : Union[str, Any]=24858 , a : str=True , **a : int , ) -> Union[str, Any]:
"""simple docstring"""
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = type_vocab_size
lowercase = layer_norm_eps
lowercase = use_cache
lowercase = enable_pronunciation
lowercase = enable_shape
lowercase = pronunciation_embed_dim
lowercase = pronunciation_vocab_size
lowercase = shape_embed_dim
lowercase = shape_vocab_size
lowercase = concat_input
lowercase = position_embedding_type
lowercase = classifier_dropout
super().__init__(pad_token_id=a , **a ) | 396 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A_ ( __UpperCamelCase : str , __UpperCamelCase : dict ):
lowercase = BeautifulSoup(requests.get(__UpperCamelCase , params=__UpperCamelCase ).content , '''html.parser''' )
lowercase = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
lowercase = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2_018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params)) | 396 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=30 , UpperCAmelCase=400 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=True , UpperCAmelCase=1 / 255 , UpperCAmelCase=True , ):
'''simple docstring'''
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def _UpperCAmelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False ):
'''simple docstring'''
if not batched:
_lowercase = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
_lowercase = image.size
else:
_lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size["""shortest_edge"""] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size["""shortest_edge"""] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(UpperCamelCase_ , key=lambda UpperCAmelCase : item[0] )[0]
_lowercase = max(UpperCamelCase_ , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = ConditionalDetrImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = ConditionalDetrImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """size""" ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
_lowercase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowercase = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowercase = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
_lowercase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowercase = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowercase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values
_lowercase = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowercase = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowercase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values
_lowercase = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 39769, 'annotations': target}
# encode them
_lowercase = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
_lowercase = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors="""pt""" )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase_ )
_lowercase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5_887.9_600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase_ ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase_ )
_lowercase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase_ ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase_ ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase_ ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase_ ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase_ ) )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_lowercase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_lowercase = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
_lowercase = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors="""pt""" )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase_ )
_lowercase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase_ ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase_ )
_lowercase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase_ ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase_ ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase_ ) )
# verify masks
_lowercase = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase_ )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase_ ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase_ ) )
| 398 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = '''▁'''
UpperCamelCase_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
UpperCamelCase_ = {
'''google/pegasus-xsum''': 5_12,
}
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = VOCAB_FILES_NAMES
A_ : str = PRETRAINED_VOCAB_FILES_MAP
A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : int = PegasusTokenizer
A_ : Dict = ['input_ids', 'attention_mask']
def __init__( self : List[str] , UpperCamelCase_ : str=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[Any]="<mask_2>" , UpperCamelCase_ : int="<mask_1>" , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Optional[Any]=1_03 , **UpperCamelCase_ : Optional[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Dict = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(UpperCamelCase_ )}, but is'''
f''' {type(UpperCamelCase_ )}''' )
SCREAMING_SNAKE_CASE__ :List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(UpperCamelCase_ ) , self.offset - 1 )
]
if len(set(UpperCamelCase_ ) ) != len(UpperCamelCase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
SCREAMING_SNAKE_CASE__ :List[str] = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE__ :Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ :int = vocab_file
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False if not self.vocab_file else True
def __lowerCamelCase ( self : int , UpperCamelCase_ : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :int = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ :Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 209 | 0 |
'''simple docstring'''
def lowerCamelCase ( ) -> int:
'''simple docstring'''
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(UpperCAmelCase__ , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"{solution() = }")
| 320 | '''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _SCREAMING_SNAKE_CASE:
A_ : int
A_ : int
class _SCREAMING_SNAKE_CASE:
def __init__( self : str , UpperCamelCase_ : int ) -> Any:
SCREAMING_SNAKE_CASE__ :list[list[Edge]] = [[] for _ in range(UpperCamelCase_ )]
SCREAMING_SNAKE_CASE__ :List[Any] = size
def __getitem__( self : Optional[Any] , UpperCamelCase_ : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def __lowerCamelCase ( self : Optional[int] ) -> Any:
return self._size
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Dict:
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(UpperCamelCase_ , UpperCamelCase_ ) )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> int | None:
SCREAMING_SNAKE_CASE__ :int = deque([start_vertex] )
SCREAMING_SNAKE_CASE__ :list[int | None] = [None] * self.size
SCREAMING_SNAKE_CASE__ :List[str] = 0
while queue:
SCREAMING_SNAKE_CASE__ :Any = queue.popleft()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE__ :Tuple = current_distance + edge.weight
SCREAMING_SNAKE_CASE__ :str = distances[edge.destination_vertex]
if (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE__ :Optional[int] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
"""simple docstring"""
if gpta_config_file == "":
_UpperCAmelCase = GPTaConfig()
else:
_UpperCAmelCase = GPTaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = GPTaModel(SCREAMING_SNAKE_CASE_ )
# Load weights from numpy
load_tf_weights_in_gpta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
_UpperCAmelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_UpperCAmelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
UpperCAmelCase_ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 32 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self , __magic_name__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """sshleifer/tiny-gpt2"""
lowerCamelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = """sgugger/tiny-distilbert-classification"""
lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """sshleifer/tiny-gpt2"""
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , )
lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ )
benchmark.run()
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__magic_name__ ):
self.assertTrue(hasattr(__magic_name__ , """sequential""" ) )
self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) )
self.assertTrue(hasattr(__magic_name__ , """current""" ) )
self.assertTrue(hasattr(__magic_name__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
| 681 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _UpperCAmelCase( lowerCamelCase ):
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self._create_example_records()
_UpperCamelCase = Dataset.from_list(__a)
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''])
for i, r in enumerate(__a):
self.assertDictEqual(__a , example_records[i])
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self._create_example_records()
_UpperCamelCase = Dataset.from_list(__a)
_UpperCamelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def UpperCAmelCase ( self) -> Optional[Any]: # checks what happens with missing columns
'''simple docstring'''
_UpperCamelCase = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
_UpperCamelCase = Dataset.from_list(__a)
self.assertDictEqual(dset[0] , {'''col_1''': 1})
self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns
def UpperCAmelCase ( self) -> List[str]: # checks if the type can be inferred from the second record
'''simple docstring'''
_UpperCamelCase = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
_UpperCamelCase = Dataset.from_list(__a)
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''')))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = Dataset.from_list([])
self.assertEqual(len(__a) , 0)
self.assertListEqual(dset.column_names , [])
| 78 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = np.inf
def set_batch_size(__snake_case ) -> None:
nonlocal batch_size
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = min(__snake_case, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__snake_case, __snake_case ):
_UpperCamelCase = min(__snake_case, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__snake_case, __snake_case ) and feature.dtype == "binary":
_UpperCamelCase = min(__snake_case, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__snake_case, __snake_case )
return None if batch_size is np.inf else batch_size
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a = None , __a = None , __a = None , __a = False , __a = False , __a = None , **__a , ) -> Dict:
'''simple docstring'''
super().__init__(
__a , split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , )
_UpperCamelCase = path_or_paths if isinstance(__a , __a) else {self.split: path_or_paths}
_UpperCamelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
_UpperCamelCase = Parquet(
cache_dir=__a , data_files=__a , features=__a , hash=__a , **__a , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
# Build iterable dataset
if self.streaming:
_UpperCamelCase = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , )
_UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__a , in_memory=self.keep_in_memory)
return dataset
class _UpperCAmelCase:
def __init__( self , __a , __a , __a = None , **__a , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = dataset
_UpperCamelCase = path_or_buf
_UpperCamelCase = batch_size or get_writer_batch_size(dataset.features)
_UpperCamelCase = parquet_writer_kwargs
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with open(self.path_or_buf , '''wb+''') as buffer:
_UpperCamelCase = self._write(file_obj=__a , batch_size=__a , **self.parquet_writer_kwargs)
else:
_UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__a , **self.parquet_writer_kwargs)
return written
def UpperCAmelCase ( self , __a , __a , **__a) -> int:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __a)
_UpperCamelCase = self.dataset.features.arrow_schema
_UpperCamelCase = pq.ParquetWriter(__a , schema=__a , **__a)
for offset in logging.tqdm(
range(0 , len(self.dataset) , __a) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
_UpperCamelCase = query_table(
table=self.dataset._data , key=slice(__a , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__a)
written += batch.nbytes
writer.close()
return written
| 78 | 1 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def A ( _SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
SCREAMING_SNAKE_CASE__ : list[bool | None] = [None] * 10000000
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : List[str] = False
def A ( _SCREAMING_SNAKE_CASE ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase : List[str] = chain(next_number(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Optional[int] = number_chain
while number < 1000_0000:
lowerCamelCase : List[Any] = number_chain
number *= 10
return number_chain
def A ( _SCREAMING_SNAKE_CASE = 1000_0000 ) -> int:
for i in range(1 ,_SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 311 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
if len(UpperCamelCase__ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
lowerCamelCase : list[float] = list(UpperCamelCase__ )
lowerCamelCase : List[str] = degree
def __add__( self , UpperCamelCase__ ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowerCamelCase : Union[str, Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , UpperCamelCase__ )
else:
lowerCamelCase : Dict = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , UpperCamelCase__ )
def __sub__( self , UpperCamelCase__ ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , UpperCamelCase__ ) -> Polynomial:
lowerCamelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> int | float:
lowerCamelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
lowerCamelCase : Any = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCamelCase__ )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def _lowercase ( self ) -> Polynomial:
lowerCamelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
lowerCamelCase : Any = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ = 0 ) -> Polynomial:
lowerCamelCase : list[float] = [0] * (self.degree + 2)
lowerCamelCase : Optional[int] = constant
for i in range(self.degree + 1 ):
lowerCamelCase : List[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , UpperCamelCase__ )
def __eq__( self , UpperCamelCase__ ) -> bool:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , UpperCamelCase__ ) -> bool:
return not self.__eq__(UpperCamelCase__ )
| 311 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class snake_case__ :
'''simple docstring'''
lowerCamelCase : int
lowerCamelCase : int
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ ) -> Any:
'''simple docstring'''
__snake_case :list[list[Edge]] = [[] for _ in range(a__ )]
__snake_case :List[str] = size
def __getitem__( self , a__ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def __lowercase ( self ) -> str:
'''simple docstring'''
return self._size
def __lowercase ( self , a__ , a__ , a__ ) -> List[Any]:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(a__ , a__ ) )
def __lowercase ( self , a__ , a__ ) -> int | None:
'''simple docstring'''
__snake_case :Optional[Any] = deque([start_vertex] )
__snake_case :list[int | None] = [None] * self.size
__snake_case :Tuple = 0
while queue:
__snake_case :List[Any] = queue.popleft()
__snake_case :Optional[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__snake_case :Optional[Any] = current_distance + edge.weight
__snake_case :Dict = distances[edge.destination_vertex]
if (
isinstance(a__ , a__ )
and new_distance >= dest_vertex_distance
):
continue
__snake_case :Dict = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ = get_tests_dir("""fixtures/dummy-config.json""")
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :Optional[int] = 0
def __lowercase ( self ) -> int:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Optional[Any] = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(a__ , a__ )
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :List[str] = AutoConfig.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :Tuple = AutoConfig.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :Optional[int] = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(a__ , a__ )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__snake_case :List[Any] = os.path.join(a__ , """fake-roberta""" )
os.makedirs(a__ , exist_ok=a__ )
with open(os.path.join(a__ , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
__snake_case :Optional[Any] = AutoConfig.from_pretrained(a__ )
self.assertEqual(type(a__ ) , a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , a__ )
# Wrong model type will raise an error
with self.assertRaises(a__ ):
AutoConfig.register("""model""" , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
AutoConfig.register("""bert""" , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case :Optional[int] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a__ )
__snake_case :Tuple = AutoConfig.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowercase ( self ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
a__ , """bert-base is not a local folder and is not a valid model identifier""" ):
__snake_case :str = AutoConfig.from_pretrained("""bert-base""" )
def __lowercase ( self ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
a__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__snake_case :Dict = AutoConfig.from_pretrained(a__ , revision="""aaaaaa""" )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
a__ , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
__snake_case :Optional[int] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowercase ( self ) -> int:
'''simple docstring'''
with self.assertRaises(a__ ):
__snake_case :Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a__ ):
__snake_case :int = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=a__ )
__snake_case :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=a__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a__ )
__snake_case :Union[str, Any] = AutoConfig.from_pretrained(a__ , trust_remote_code=a__ )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Optional[Any] = "new-model"
try:
AutoConfig.register("""new-model""" , a__ )
# If remote code is not set, the default is to use local
__snake_case :List[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
__snake_case :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=a__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
__snake_case :List[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=a__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 291 | 1 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_A : Union[str, Any] = """pytorch_model.bin"""
_A : Optional[int] = """pytorch_model.bin.index.json"""
_A : str = """adapter_config.json"""
_A : Any = """adapter_model.bin"""
_A : Tuple = """adapter_model.safetensors"""
_A : List[Any] = """tf_model.h5"""
_A : Dict = """tf_model.h5.index.json"""
_A : List[Any] = """model.ckpt"""
_A : Optional[Any] = """flax_model.msgpack"""
_A : int = """flax_model.msgpack.index.json"""
_A : Optional[Any] = """model.safetensors"""
_A : str = """model.safetensors.index.json"""
_A : Dict = """config.json"""
_A : int = """preprocessor_config.json"""
_A : Dict = FEATURE_EXTRACTOR_NAME
_A : Union[str, Any] = """generation_config.json"""
_A : Optional[Any] = """modelcard.json"""
_A : List[str] = """▁"""
_A : Any = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_A : List[Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_A : Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_A : str = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __snake_case ( lowerCAmelCase_ ) -> List[str]:
if version.parse(lowerCAmelCase_ ) < version.parse(lowerCAmelCase_ ):
if "dev" in min_version:
SCREAMING_SNAKE_CASE__ = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
SCREAMING_SNAKE_CASE__ = f'''This example requires a minimum version of {min_version},'''
error_message += f''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 100 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : List[str] = logging.get_logger(__name__)
__magic_name__ : Tuple = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __snake_case (lowerCamelCase ):
__a = '''encodec'''
def __init__( self: Union[str, Any] , A_: Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , A_: Any=2_40_00 , A_: List[Any]=1 , A_: Optional[Any]=False , A_: Optional[int]=None , A_: int=None , A_: List[str]=1_28 , A_: Union[str, Any]=32 , A_: List[str]=1 , A_: Dict=[8, 5, 4, 2] , A_: List[Any]="weight_norm" , A_: Any=7 , A_: List[Any]=7 , A_: Tuple=3 , A_: List[str]=2 , A_: Optional[Any]=True , A_: Optional[int]="reflect" , A_: Dict=2 , A_: Union[str, Any]=2 , A_: Union[str, Any]=1.0 , A_: List[str]=10_24 , A_: str=None , A_: List[str]=True , **A_: int , ):
__lowerCamelCase = target_bandwidths
__lowerCamelCase = sampling_rate
__lowerCamelCase = audio_channels
__lowerCamelCase = normalize
__lowerCamelCase = chunk_length_s
__lowerCamelCase = overlap
__lowerCamelCase = hidden_size
__lowerCamelCase = num_filters
__lowerCamelCase = num_residual_layers
__lowerCamelCase = upsampling_ratios
__lowerCamelCase = norm_type
__lowerCamelCase = kernel_size
__lowerCamelCase = last_kernel_size
__lowerCamelCase = residual_kernel_size
__lowerCamelCase = dilation_growth_rate
__lowerCamelCase = use_causal_conv
__lowerCamelCase = pad_mode
__lowerCamelCase = compress
__lowerCamelCase = num_lstm_layers
__lowerCamelCase = trim_right_ratio
__lowerCamelCase = codebook_size
__lowerCamelCase = codebook_dim if codebook_dim is not None else hidden_size
__lowerCamelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**A_ )
@property
def __a ( self: Union[str, Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __a ( self: List[Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __a ( self: List[Any] ):
__lowerCamelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __a ( self: List[Any] ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 281 | 0 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A__ ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = tmp_path / "cache"
snake_case__ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Tuple = JsonDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ ).read()
_check_json_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A__ ( _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
snake_case__ : Tuple = tmp_path / "cache"
snake_case__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case__ : List[str] = features.copy() if features else default_expected_features
snake_case__ : Tuple = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : List[Any] = JsonDatasetReader(UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_json_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def A__ ( _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case__ : List[str] = tmp_path / "cache"
snake_case__ : Any = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
snake_case__ : Tuple = features.copy() if features else default_expected_features
snake_case__ : str = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Optional[Any] = JsonDatasetReader(UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
snake_case__ : int = features.copy()
snake_case__ : Optional[Any] = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Optional[Any] = tmp_path / "cache"
snake_case__ : str = JsonDatasetReader(UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A__ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case__ : List[str] = tmp_path / "cache"
snake_case__ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case__ : int = JsonDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , split=UpperCAmelCase__ ).read()
_check_json_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def A__ ( _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
if issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case__ : Optional[Any] = jsonl_path
elif issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case__ : str = [jsonl_path]
snake_case__ : List[str] = tmp_path / "cache"
snake_case__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case__ : Optional[Any] = JsonDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_json_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
def A__ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=("train",) ) -> Dict:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
for split in splits:
snake_case__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A__ ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = tmp_path / "cache"
snake_case__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : List[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ ).read()
_check_json_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A__ ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = tmp_path / "cache"
snake_case__ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case__ : Dict = features.copy() if features else default_expected_features
snake_case__ : Union[str, Any] = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_json_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A__ ( _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
if split:
snake_case__ : Tuple = {split: jsonl_path}
else:
snake_case__ : Optional[Any] = "train"
snake_case__ : Any = {"train": jsonl_path, "test": jsonl_path}
snake_case__ : Any = tmp_path / "cache"
snake_case__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
snake_case__ : Optional[Any] = JsonDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_json_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( _UpperCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
return json.load(UpperCAmelCase__ )
def A__ ( _UpperCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
return [json.loads(UpperCAmelCase__ ) for line in buffer]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)])
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_A , _A , lines=_A).write()
buffer.seek(0)
snake_case__ : Union[str, Any] = load_json_function(_A)
assert isinstance(_A , _A)
assert isinstance(exported_content[0] , _A)
assert len(_A) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789"), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) -> Dict:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_A , _A , lines=_A , orient=_A).write()
buffer.seek(0)
snake_case__ : Tuple = load_json(_A)
assert isinstance(_A , _A)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_A , "keys") and not hasattr(exported_content[0] , "keys")
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(_A) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)])
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) -> List[str]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_A , _A , lines=_A , num_proc=2).write()
buffer.seek(0)
snake_case__ : List[Any] = load_json_function(_A)
assert isinstance(_A , _A)
assert isinstance(exported_content[0] , _A)
assert len(_A) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789"), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) -> Dict:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_A , _A , lines=_A , orient=_A , num_proc=2).write()
buffer.seek(0)
snake_case__ : str = load_json(_A)
assert isinstance(_A , _A)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_A , "keys") and not hasattr(exported_content[0] , "keys")
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(_A) == 10
def UpperCAmelCase ( self , lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
with pytest.raises(_A):
with io.BytesIO() as buffer:
JsonDatasetWriter(_A , _A , num_proc=0)
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")])
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = tmp_path_factory.mktemp("data") / f"""test.json.{extension}"""
snake_case__ : str = str(shared_datadir / f"""test_file.json.{extension}""")
JsonDatasetWriter(_A , _A , compression=_A).write()
with fsspec.open(_A , "rb" , compression="infer") as f:
snake_case__ : List[str] = f.read()
with fsspec.open(_A , "rb" , compression="infer") as f:
snake_case__ : Dict = f.read()
assert exported_content == original_content
| 714 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 150 | 0 |
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _lowerCAmelCase(a : Dict , a : List[str] , a : str , a : str ) -> List[str]:
_SCREAMING_SNAKE_CASE =multiprocessing.Manager()
_SCREAMING_SNAKE_CASE =manager.list()
_SCREAMING_SNAKE_CASE =multiprocessing.Process(target=a , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _lowerCAmelCase(a : Union[str, Any] , a : Tuple , a : int ) -> int:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_SCREAMING_SNAKE_CASE =shutil.rmtree
_SCREAMING_SNAKE_CASE =os.rmdir
_SCREAMING_SNAKE_CASE =os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_SCREAMING_SNAKE_CASE ={}
with swallow_io():
with time_limit(a ):
exec(a , a )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
_SCREAMING_SNAKE_CASE =rmtree
_SCREAMING_SNAKE_CASE =rmdir
_SCREAMING_SNAKE_CASE =chdir
@contextlib.contextmanager
def _lowerCAmelCase(a : List[str] ) -> Optional[int]:
def signal_handler(a : Optional[int] , a : List[str] ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , a )
signal.signal(signal.SIGALRM , a )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _lowerCAmelCase() -> List[Any]:
_SCREAMING_SNAKE_CASE =WriteOnlyStringIO()
with contextlib.redirect_stdout(a ):
with contextlib.redirect_stderr(a ):
with redirect_stdin(a ):
yield
@contextlib.contextmanager
def _lowerCAmelCase() -> Tuple:
with tempfile.TemporaryDirectory() as dirname:
with chdir(a ):
yield dirname
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
pass
class __UpperCAmelCase ( io.StringIO ):
'''simple docstring'''
def UpperCamelCase_ ( self , *_A , **_A ):
'''simple docstring'''
raise OSError
def UpperCamelCase_ ( self , *_A , **_A ):
'''simple docstring'''
raise OSError
def UpperCamelCase_ ( self , *_A , **_A ):
'''simple docstring'''
raise OSError
def UpperCamelCase_ ( self , *_A , **_A ):
'''simple docstring'''
return False
class __UpperCAmelCase ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
lowercase : List[Any] = "stdin"
@contextlib.contextmanager
def _lowerCAmelCase(a : int ) -> Dict:
if root == ".":
yield
return
_SCREAMING_SNAKE_CASE =os.getcwd()
os.chdir(a )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(a )
def _lowerCAmelCase(a : Union[str, Any]=None ) -> int:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
import os
_SCREAMING_SNAKE_CASE ='''1'''
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
import shutil
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
import subprocess
_SCREAMING_SNAKE_CASE =None # type: ignore
_SCREAMING_SNAKE_CASE =None
import sys
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
| 255 |
"""simple docstring"""
from math import pi, sqrt, tan
def _lowerCAmelCase(a : float ) -> float:
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _lowerCAmelCase(a : float , a : float , a : float ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _lowerCAmelCase(a : float ) -> float:
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _lowerCAmelCase(a : float ) -> float:
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _lowerCAmelCase(a : float , a : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _lowerCAmelCase(a : float , a : float , a : float ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
_SCREAMING_SNAKE_CASE =(height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _lowerCAmelCase(a : float , a : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _lowerCAmelCase(a : float , a : float ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(a , 2 ) * torus_radius * tube_radius
def _lowerCAmelCase(a : float , a : float ) -> float:
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _lowerCAmelCase(a : float ) -> float:
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _lowerCAmelCase(a : float , a : float ) -> float:
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _lowerCAmelCase(a : float , a : float , a : float ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
_SCREAMING_SNAKE_CASE =(sidea + sidea + sidea) / 2
_SCREAMING_SNAKE_CASE =sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _lowerCAmelCase(a : float , a : float ) -> float:
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _lowerCAmelCase(a : float , a : float , a : float ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _lowerCAmelCase(a : float ) -> float:
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _lowerCAmelCase(a : float , a : float ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _lowerCAmelCase(a : float , a : float ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _lowerCAmelCase(a : int , a : float ) -> float:
if not isinstance(a , a ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f"Rectangle: {area_rectangle(1_0, 2_0) = }")
print(f"Square: {area_square(1_0) = }")
print(f"Triangle: {area_triangle(1_0, 1_0) = }")
print(f"Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }")
print(f"Parallelogram: {area_parallelogram(1_0, 2_0) = }")
print(f"Rhombus: {area_rhombus(1_0, 2_0) = }")
print(f"Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }")
print(f"Circle: {area_circle(2_0) = }")
print(f"Ellipse: {area_ellipse(1_0, 2_0) = }")
print('''\nSurface Areas of various geometric shapes: \n''')
print(f"Cube: {surface_area_cube(2_0) = }")
print(f"Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }")
print(f"Sphere: {surface_area_sphere(2_0) = }")
print(f"Hemisphere: {surface_area_hemisphere(2_0) = }")
print(f"Cone: {surface_area_cone(1_0, 2_0) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }")
print(f"Cylinder: {surface_area_cylinder(1_0, 2_0) = }")
print(f"Torus: {surface_area_torus(2_0, 1_0) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 1_0) = }")
print(f"Square: {area_reg_polygon(4, 1_0) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 1_0) = }")
| 255 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCAmelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_lowerCAmelCase = VideoClassificationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase , top_k=2 )
_lowerCAmelCase = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
for example in examples:
_lowerCAmelCase = video_classifier(_lowerCAmelCase )
self.assertEqual(
_lowerCAmelCase , [
{'''score''': ANY(_lowerCAmelCase ), '''label''': ANY(_lowerCAmelCase )},
{'''score''': ANY(_lowerCAmelCase ), '''label''': ANY(_lowerCAmelCase )},
] , )
@require_torch
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
_lowerCAmelCase = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
_lowerCAmelCase = pipeline(
'''video-classification''' , model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , frame_sampling_rate=4 )
_lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_lowerCAmelCase = video_classifier(_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
_lowerCAmelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def __lowerCAmelCase ( self ):
pass | 713 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = 300
return config
def __lowerCAmelCase ( self ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = True
_lowerCAmelCase = MraModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = ()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def __lowerCAmelCase ( self ):
return
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
_lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) | 664 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 91 |
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=3 , _lowerCAmelCase=32 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=[8, 16, 32, 64] , _lowerCAmelCase=[1, 1, 2, 1] , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=3 , _lowerCAmelCase=None , _lowerCAmelCase=["stage2", "stage3", "stage4"] , _lowerCAmelCase=[2, 3, 4] , _lowerCAmelCase=1 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = parent
__SCREAMING_SNAKE_CASE: Tuple = batch_size
__SCREAMING_SNAKE_CASE: Any = image_size
__SCREAMING_SNAKE_CASE: Optional[int] = num_channels
__SCREAMING_SNAKE_CASE: Optional[int] = embeddings_size
__SCREAMING_SNAKE_CASE: Optional[Any] = hidden_sizes
__SCREAMING_SNAKE_CASE: List[Any] = depths
__SCREAMING_SNAKE_CASE: List[str] = is_training
__SCREAMING_SNAKE_CASE: Tuple = use_labels
__SCREAMING_SNAKE_CASE: Any = hidden_act
__SCREAMING_SNAKE_CASE: Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE: List[str] = scope
__SCREAMING_SNAKE_CASE: Tuple = len(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = out_features
__SCREAMING_SNAKE_CASE: Dict = out_indices
__SCREAMING_SNAKE_CASE: Tuple = num_groups
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE: Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE: Any = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE: Tuple = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = BitModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Any = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = self.num_labels
__SCREAMING_SNAKE_CASE: Optional[Any] = BitForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: int = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = BitBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: List[str] = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__SCREAMING_SNAKE_CASE: Optional[int] = None
__SCREAMING_SNAKE_CASE: Dict = BitBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: str = config_and_inputs
__SCREAMING_SNAKE_CASE: List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( __lowercase ,__lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : List[str] = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = BitModelTester(self )
__SCREAMING_SNAKE_CASE: Optional[int] = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE: List[str] = model_class(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE: Optional[int] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE: Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE: int = model_class(config=_lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(_lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def snake_case_ ( self ):
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Dict = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE: Tuple = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE: List[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE: Any = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__SCREAMING_SNAKE_CASE: Optional[int] = layer_type
__SCREAMING_SNAKE_CASE: Any = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE: List[Any] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def snake_case_ ( self ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE: List[Any] = BitModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase ( ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = self.default_image_processor
__SCREAMING_SNAKE_CASE: Dict = prepare_img()
__SCREAMING_SNAKE_CASE: str = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE: Optional[Any] = model(**_lowerCAmelCase )
# verify the logits
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@require_torch
class a ( __lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : int = (BitBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : List[str] = BitConfig
SCREAMING_SNAKE_CASE__ : List[Any] = False
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = BitModelTester(self )
| 146 |
from math import ceil
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = list(range(0 , UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE: Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__SCREAMING_SNAKE_CASE: List[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(UpperCamelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(UpperCamelCase__ )
# Missing blocks
__SCREAMING_SNAKE_CASE: Any = [i for i in blocks if i not in device_map_blocks]
__SCREAMING_SNAKE_CASE: List[str] = [i for i in device_map_blocks if i not in blocks]
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(UpperCamelCase__ ) )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = list(range(UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE: Optional[int] = int(ceil(n_layers / len(UpperCamelCase__ ) ) )
__SCREAMING_SNAKE_CASE: str = [layers[i : i + n_blocks] for i in range(0 , UpperCamelCase__ , UpperCamelCase__ )]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
| 146 | 1 |
import math
def A ( snake_case__ : Dict ) -> Any:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( snake_case__ : List[Any] = 1_0001 ) -> List[str]:
'''simple docstring'''
try:
__snake_case = int(snake_case__ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
__snake_case = []
__snake_case = 2
while len(snake_case__ ) < nth:
if is_prime(snake_case__ ):
primes.append(snake_case__ )
num += 1
else:
num += 1
return primes[len(snake_case__ ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 313 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if "cls_token" in name:
_lowerCAmelCase = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
_lowerCAmelCase = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
_lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
_lowerCAmelCase = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_lowerCAmelCase = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_lowerCAmelCase = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
_lowerCAmelCase = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
_lowerCAmelCase = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
_lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
_lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
_lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
_lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
_lowerCAmelCase = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
_lowerCAmelCase = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(lowerCAmelCase )
if "qkv" in key:
_lowerCAmelCase = key.split(""".""" )
_lowerCAmelCase = int(key_split[1] )
if "decoder_blocks" in key:
_lowerCAmelCase = config.decoder_hidden_size
_lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = config.hidden_size
_lowerCAmelCase = """vit.encoder.layer."""
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = ViTMAEConfig()
if "large" in checkpoint_url:
_lowerCAmelCase = 10_24
_lowerCAmelCase = 40_96
_lowerCAmelCase = 24
_lowerCAmelCase = 16
elif "huge" in checkpoint_url:
_lowerCAmelCase = 14
_lowerCAmelCase = 12_80
_lowerCAmelCase = 51_20
_lowerCAmelCase = 32
_lowerCAmelCase = 16
_lowerCAmelCase = ViTMAEForPreTraining(lowerCAmelCase )
_lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" )["""model"""]
_lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
_lowerCAmelCase = convert_state_dict(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
_lowerCAmelCase = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
_lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
_lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
_lowerCAmelCase = image_processor(images=lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_lowerCAmelCase = model(**lowerCAmelCase )
_lowerCAmelCase = outputs.logits
if "large" in checkpoint_url:
_lowerCAmelCase = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
_lowerCAmelCase = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
_lowerCAmelCase = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : List[Any] =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 207 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = ReformerTokenizer
lowerCAmelCase : Optional[Any] = ReformerTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : Any = False
lowerCAmelCase : str = True
def __A ( self ):
super().setUp()
A__ = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
A__ = "<s>"
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def __A ( self ):
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(UpperCAmelCase__ ) , 1_000 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __A ( self ):
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = "I was born in 92000, and this is falsé."
A__ = tokenizer.tokenize(UpperCAmelCase__ )
A__ = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
A__ = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(UpperCAmelCase__ )
A__ = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
A__ = "This is a simple input"
A__ = ["This is a simple input 1", "This is a simple input 2"]
A__ = ("This is a simple input", "This is a pair")
A__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="max_length" )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="max_length" )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="max_length" , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="max_length" )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="max_length" )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="max_length" , )
def __A ( self ):
pass
def __A ( self ):
A__ = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
A__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __A ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def __A ( self ):
A__ = "Hello World!"
A__ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def __A ( self ):
A__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
A__ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def __A ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
A__ = " ".join(UpperCAmelCase__ )
A__ = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors="pt" )
A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
A__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A__ = encoded_sequence["input_ids"].shape
A__ = ReformerModel(UpperCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def __A ( self ):
# fmt: off
A__ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A__ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=UpperCAmelCase__ , sequences=UpperCAmelCase__ , )
| 718 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
UpperCAmelCase_ : List[Any] = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
UpperCAmelCase_ : Union[str, Any] = {
"ctrl": 256,
}
UpperCAmelCase_ : Any = {
"Pregnancy": 168_629,
"Christianity": 7_675,
"Explain": 106_423,
"Fitness": 63_440,
"Saving": 63_163,
"Ask": 27_171,
"Ass": 95_985,
"Joke": 163_509,
"Questions": 45_622,
"Thoughts": 49_605,
"Retail": 52_342,
"Feminism": 164_338,
"Writing": 11_992,
"Atheism": 192_263,
"Netflix": 48_616,
"Computing": 39_639,
"Opinion": 43_213,
"Alone": 44_967,
"Funny": 58_917,
"Gaming": 40_358,
"Human": 4_088,
"India": 1_331,
"Joker": 77_138,
"Diet": 36_206,
"Legal": 11_859,
"Norman": 4_939,
"Tip": 72_689,
"Weight": 52_343,
"Movies": 46_273,
"Running": 23_425,
"Science": 2_090,
"Horror": 37_793,
"Confession": 60_572,
"Finance": 12_250,
"Politics": 16_360,
"Scary": 191_985,
"Support": 12_654,
"Technologies": 32_516,
"Teenage": 66_160,
"Event": 32_769,
"Learned": 67_460,
"Notion": 182_770,
"Wikipedia": 37_583,
"Books": 6_665,
"Extract": 76_050,
"Confessions": 102_701,
"Conspiracy": 75_932,
"Links": 63_674,
"Narcissus": 150_425,
"Relationship": 54_766,
"Relationships": 134_796,
"Reviews": 41_671,
"News": 4_256,
"Translation": 26_820,
"multilingual": 128_406,
}
def UpperCamelCase ( _A : Dict )-> List[Any]:
"""simple docstring"""
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
A__ = set(_A )
return pairs
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Any = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Any = CONTROL_CODES
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__="<unk>" , **UpperCAmelCase__ ):
super().__init__(unk_token=UpperCAmelCase__ , **UpperCAmelCase__ )
with open(UpperCAmelCase__ , encoding="utf-8" ) as vocab_handle:
A__ = json.load(UpperCAmelCase__ )
A__ = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase__ , encoding="utf-8" ) as merges_handle:
A__ = merges_handle.read().split("\n" )[1:-1]
A__ = [tuple(merge.split() ) for merge in merges]
A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
A__ = {}
@property
def __A ( self ):
return len(self.encoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , UpperCAmelCase__ ):
if token in self.cache:
return self.cache[token]
A__ = tuple(UpperCAmelCase__ )
A__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A__ = get_pairs(UpperCAmelCase__ )
if not pairs:
return token
while True:
A__ = min(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : self.bpe_ranks.get(UpperCAmelCase__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(UpperCAmelCase__ ):
try:
A__ = word.index(UpperCAmelCase__ , UpperCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ = j
if word[i] == first and i < len(UpperCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ = tuple(UpperCAmelCase__ )
A__ = new_word
if len(UpperCAmelCase__ ) == 1:
break
else:
A__ = get_pairs(UpperCAmelCase__ )
A__ = "@@ ".join(UpperCAmelCase__ )
A__ = word[:-4]
A__ = word
return word
def __A ( self , UpperCAmelCase__ ):
A__ = []
A__ = re.findall(R"\S+\n?" , UpperCAmelCase__ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase__ ).split(" " ) ) )
return split_tokens
def __A ( self , UpperCAmelCase__ ):
return self.encoder.get(UpperCAmelCase__ , self.encoder.get(self.unk_token ) )
def __A ( self , UpperCAmelCase__ ):
return self.decoder.get(UpperCAmelCase__ , self.unk_token )
def __A ( self , UpperCAmelCase__ ):
A__ = " ".join(UpperCAmelCase__ ).replace("@@ " , "" ).strip()
return out_string
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + "\n" )
A__ = 0
with open(UpperCAmelCase__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
A__ = token_index
writer.write(" ".join(UpperCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 232 | 0 |
from bisect import bisect
from itertools import accumulate
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : str ,lowerCamelCase_ : Tuple ,lowerCamelCase_ : List[str]):
'''simple docstring'''
lowerCAmelCase__ : List[str] = sorted(zip(lowerCamelCase_ ,lowerCamelCase_) ,key=lambda lowerCamelCase_: x[0] / x[1] ,reverse=lowerCamelCase_)
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase__ : Optional[int] = list(accumulate(lowerCamelCase_))
lowerCAmelCase__ : Optional[int] = bisect(lowerCamelCase_ ,lowerCamelCase_)
return (
0
if k == 0
else sum(vl[:k]) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k])
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 647 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : int ={
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] =[
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 647 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=8 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ) -> int:
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
if latents is None:
__SCREAMING_SNAKE_CASE = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__SCREAMING_SNAKE_CASE = latents.to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Optional[Any]=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
__SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
__SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[Any] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 1_0_0 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ) -> str:
__SCREAMING_SNAKE_CASE = self._execution_device
__SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase__ , dim=0 )
__SCREAMING_SNAKE_CASE = image_embeds.shape[0] * num_images_per_prompt
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase__ , dim=0 )
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
__SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
__SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps
__SCREAMING_SNAKE_CASE = self.unet.config.in_channels
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
__SCREAMING_SNAKE_CASE = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE = {"image_embeds": image_embeds}
__SCREAMING_SNAKE_CASE = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
__SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
__SCREAMING_SNAKE_CASE = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
__SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 553 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
a__ : str = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : Optional[str] = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
if self.train_file is not None:
__SCREAMING_SNAKE_CASE = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__SCREAMING_SNAKE_CASE = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : PreTrainedTokenizerBase
snake_case__ : Union[bool, str, PaddingStrategy] = True
snake_case__ : Optional[int] = None
snake_case__ : Optional[int] = None
def __call__( self : Dict , UpperCAmelCase__ : Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE = "label" if "label" in features[0].keys() else "labels"
__SCREAMING_SNAKE_CASE = [feature.pop(UpperCAmelCase__ ) for feature in features]
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = len(features[0]["input_ids"] )
__SCREAMING_SNAKE_CASE = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features
]
__SCREAMING_SNAKE_CASE = list(chain(*UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__SCREAMING_SNAKE_CASE = {k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
__SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ , dtype=torch.intaa )
return batch
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__SCREAMING_SNAKE_CASE = {}
if data_args.train_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file
if data_args.validation_file is not None:
__SCREAMING_SNAKE_CASE = data_args.validation_file
__SCREAMING_SNAKE_CASE = data_args.train_file.split("." )[-1]
__SCREAMING_SNAKE_CASE = load_dataset(
lowerCAmelCase_ , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__SCREAMING_SNAKE_CASE = [f"""ending{i}""" for i in range(4 )]
__SCREAMING_SNAKE_CASE = "sent1"
__SCREAMING_SNAKE_CASE = "sent2"
if data_args.max_seq_length is None:
__SCREAMING_SNAKE_CASE = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__SCREAMING_SNAKE_CASE = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [[context] * 4 for context in examples[context_name]]
__SCREAMING_SNAKE_CASE = examples[question_header_name]
__SCREAMING_SNAKE_CASE = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowerCAmelCase_ )
]
# Flatten out
__SCREAMING_SNAKE_CASE = list(chain(*lowerCAmelCase_ ) )
__SCREAMING_SNAKE_CASE = list(chain(*lowerCAmelCase_ ) )
# Tokenize
__SCREAMING_SNAKE_CASE = tokenizer(
lowerCAmelCase_ , lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__SCREAMING_SNAKE_CASE = raw_datasets["train"]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(lowerCAmelCase_ ) , data_args.max_train_samples )
__SCREAMING_SNAKE_CASE = train_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__SCREAMING_SNAKE_CASE = train_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__SCREAMING_SNAKE_CASE = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(lowerCAmelCase_ ) , data_args.max_eval_samples )
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__SCREAMING_SNAKE_CASE = eval_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__SCREAMING_SNAKE_CASE = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = eval_predictions
__SCREAMING_SNAKE_CASE = np.argmax(lowerCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("train" , lowerCAmelCase_ )
trainer.save_metrics("train" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__SCREAMING_SNAKE_CASE = trainer.evaluate()
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("eval" , lowerCAmelCase_ )
trainer.save_metrics("eval" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 553 | 1 |
import socket
def UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowercase__ = socket.gethostname()
lowercase__ = 1_2312
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
lowercase__ = sock.recv(1024 )
if not data:
break
out_file.write(__magic_name__ )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 15 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : Tuple = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''poolformer'''
def __init__(self : Dict , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : str=16 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Union[str, Any]=4.0 , _UpperCAmelCase : str=[2, 2, 6, 2] , _UpperCAmelCase : int=[64, 128, 320, 512] , _UpperCAmelCase : Union[str, Any]=[7, 3, 3, 3] , _UpperCAmelCase : List[Any]=[4, 2, 2, 2] , _UpperCAmelCase : Union[str, Any]=[2, 1, 1, 1] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=1E-5 , _UpperCAmelCase : Tuple=0.02 , **_UpperCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = pool_size
lowercase__ = hidden_sizes
lowercase__ = mlp_ratio
lowercase__ = depths
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = num_encoder_blocks
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_layer_scale
lowercase__ = layer_scale_init_value
lowercase__ = initializer_range
super().__init__(**_UpperCAmelCase )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ (self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ (self : Dict ) -> float:
"""simple docstring"""
return 2E-3
| 15 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : float , lowercase_ : float ):
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
from ... import PretrainedConfig
lowercase_ : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__A = '''nezha'''
def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 653 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Union[str, Any] = GPTSanJapaneseTokenizer
snake_case__ : Dict = False
snake_case__ : Tuple = {"do_clean_text": False, "add_prefix_space": False}
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
_lowerCamelCase : List[Any] = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
_lowerCamelCase : Dict = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : int , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
_lowerCamelCase : str = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : int = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
_lowerCamelCase : List[Any] = '''こんにちは、世界。 こんばんは、㔺界。'''
_lowerCamelCase : Tuple = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
_lowerCamelCase : Tuple = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
_lowerCamelCase : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_lowerCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = self.get_tokenizer()
# Testing tokenization
_lowerCamelCase : List[Any] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
_lowerCamelCase : List[str] = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
_lowerCamelCase : Dict = tokenizer.encode(__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_lowerCamelCase : int = '''こんにちは、世界。'''
_lowerCamelCase : Tuple = '''こんばんは、㔺界。😀'''
_lowerCamelCase : str = '''こんにちは、世界。こんばんは、世界。😀'''
_lowerCamelCase : List[Any] = tokenizer.encode(prefix_text + input_text )
_lowerCamelCase : List[Any] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
_lowerCamelCase : int = tokenizer.encode(__lowerCAmelCase , prefix_text=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_lowerCamelCase : int = '''こんにちは、世界。'''
_lowerCamelCase : List[str] = '''こんばんは、㔺界。😀'''
_lowerCamelCase : Optional[int] = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
_lowerCamelCase : Optional[int] = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
_lowerCamelCase : Optional[Any] = [1] + [0] * (len_prefix + len_text + 1)
_lowerCamelCase : Any = [1] * (len_prefix + len_text + 1) + [0]
_lowerCamelCase : Optional[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_lowerCamelCase : int = tokenizer(prefix_text + input_text ).token_type_ids
_lowerCamelCase : Union[str, Any] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
_lowerCamelCase : int = tokenizer(__lowerCAmelCase , prefix_text=__lowerCAmelCase ).token_type_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_lowerCamelCase : List[Any] = tokenizer.encode('''あンいワ''' )
_lowerCamelCase : Optional[int] = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
_lowerCamelCase : Any = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_lowerCamelCase : Optional[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
_lowerCamelCase : Any = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.batch_encode_plus(__lowerCAmelCase , padding=__lowerCAmelCase )
# fmt: off
_lowerCamelCase : Optional[int] = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_lowerCamelCase : Any = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_lowerCamelCase : Optional[int] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , __lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
| 83 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> Dict:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __snake_case ( _lowerCAmelCase : Tuple ) -> Optional[Any]:
A_ : Optional[Any] = create_tensor(_lowerCAmelCase )
A_ : Dict = gather(_lowerCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __snake_case ( _lowerCAmelCase : List[Any] ) -> Any:
A_ : int = [state.process_index]
A_ : Union[str, Any] = gather_object(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == state.num_processes, f"{gathered_obj}, {len(_lowerCAmelCase )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), f"{gathered_obj} != {list(range(state.num_processes ) )}"
def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
A_ : List[str] = create_tensor(_lowerCAmelCase )
A_ : Optional[Any] = broadcast(_lowerCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __snake_case ( _lowerCAmelCase : Dict ) -> str:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
A_ : Tuple = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A_ : List[str] = torch.arange(state.num_processes ).to(state.device )
A_ : Any = pad_across_processes(_lowerCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __snake_case ( _lowerCAmelCase : List[Any] ) -> Tuple:
# For now runs on only two processes
if state.num_processes != 2:
return
A_ : str = create_tensor(_lowerCAmelCase )
A_ : int = reduce(_lowerCAmelCase , "sum" )
A_ : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), f"{reduced_tensor} != {truth_tensor}"
def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
A_ : List[str] = create_tensor(_lowerCAmelCase )
A_ : Tuple = reduce(_lowerCAmelCase , "mean" )
A_ : List[str] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), f"{reduced_tensor} != {truth_tensor}"
def __snake_case ( _lowerCAmelCase : List[str] ) -> Dict:
# For xla_spawn (TPUs)
main()
def __snake_case ( ) -> List[str]:
A_ : Tuple = PartialState()
state.print(f"State: {state}" )
state.print("testing gather" )
test_gather(_lowerCAmelCase )
state.print("testing gather_object" )
test_gather_object(_lowerCAmelCase )
state.print("testing broadcast" )
test_broadcast(_lowerCAmelCase )
state.print("testing pad_across_processes" )
test_pad_across_processes(_lowerCAmelCase )
state.print("testing reduce_sum" )
test_reduce_sum(_lowerCAmelCase )
state.print("testing reduce_mean" )
test_reduce_mean(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 454 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
a = logging.get_logger("transformers.models.speecht5")
a = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
a = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
a = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
a = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
a = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
a = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
a = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
a = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
a = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
a = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a = []
a = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
a = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
a = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
a = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
for attribute in key.split(""".""" ):
_UpperCAmelCase = getattr(snake_case , snake_case )
if weight_type is not None:
_UpperCAmelCase = getattr(snake_case , snake_case ).shape
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
elif weight_type == "running_mean":
_UpperCAmelCase = value
elif weight_type == "running_var":
_UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Tuple:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_UpperCAmelCase , _UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = []
if task == "s2t":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2T
_UpperCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
_UpperCAmelCase = None
_UpperCAmelCase = MAPPING_T2S
_UpperCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2S
_UpperCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(snake_case , snake_case ):
logger.info(f"{name} was ignored" )
continue
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == """group""" , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_UpperCAmelCase , _UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
_UpperCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(snake_case )[0].split(""".""" )[-2]
_UpperCAmelCase = mapped_key.replace("""*""" , snake_case )
if "weight_g" in name:
_UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
_UpperCAmelCase = """weight_v"""
elif "bias" in name:
_UpperCAmelCase = """bias"""
elif "weight" in name:
_UpperCAmelCase = """weight"""
elif "running_mean" in name:
_UpperCAmelCase = """running_mean"""
elif "running_var" in name:
_UpperCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
_UpperCAmelCase = """num_batches_tracked"""
else:
_UpperCAmelCase = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(f"Unused weights: {unused_weights}" )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
_UpperCAmelCase = name.split(""".""" )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , ) -> Union[str, Any]:
if config_path is not None:
_UpperCAmelCase = SpeechTaConfig.from_pretrained(snake_case )
else:
_UpperCAmelCase = SpeechTaConfig()
if task == "s2t":
_UpperCAmelCase = config.max_text_positions
_UpperCAmelCase = SpeechTaForSpeechToText(snake_case )
elif task == "t2s":
_UpperCAmelCase = 1_8_7_6
_UpperCAmelCase = 6_0_0
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForTextToSpeech(snake_case )
elif task == "s2s":
_UpperCAmelCase = 1_8_7_6
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForSpeechToSpeech(snake_case )
else:
raise ValueError(f"Unknown task name: {task}" )
if vocab_path:
_UpperCAmelCase = SpeechTaTokenizer(snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken("""<mask>""" , lstrip=snake_case , rstrip=snake_case )
_UpperCAmelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
_UpperCAmelCase = SpeechTaFeatureExtractor()
_UpperCAmelCase = SpeechTaProcessor(tokenizer=snake_case , feature_extractor=snake_case )
processor.save_pretrained(snake_case )
_UpperCAmelCase = torch.load(snake_case )
recursively_load_weights(fairseq_checkpoint["""model"""] , snake_case , snake_case )
model.save_pretrained(snake_case )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(snake_case )
model.push_to_hub(snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
) | 175 |
import csv
import tweepy
# Twitter API credentials
a = ""
a = ""
a = ""
a = ""
def _SCREAMING_SNAKE_CASE ( snake_case ) -> None:
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(snake_case , snake_case )
auth.set_access_token(snake_case , snake_case )
_UpperCAmelCase = tweepy.API(snake_case )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=snake_case , count=2_0_0 )
# save most recent tweets
alltweets.extend(snake_case )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(snake_case ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=snake_case , count=2_0_0 , max_id=snake_case )
# save most recent tweets
alltweets.extend(snake_case )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f"...{len(snake_case )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" , """w""" ) as f:
_UpperCAmelCase = csv.writer(snake_case )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(snake_case )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32") | 175 | 1 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = x
lowercase_ : Tuple = y
for step in range(_UpperCamelCase ): # noqa: B007
lowercase_ : Dict = a * a - b * b + x
lowercase_ : Optional[Any] = 2 * a * b + y
lowercase_ : int = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_UpperCamelCase , 1 , 1 ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 800 , _UpperCamelCase = 600 , _UpperCamelCase = -0.6 , _UpperCamelCase = 0 , _UpperCamelCase = 3.2 , _UpperCamelCase = 50 , _UpperCamelCase = True , ):
"""simple docstring"""
lowercase_ : List[str] = Image.new("RGB" , (image_width, image_height) )
lowercase_ : Dict = img.load()
# loop through the image-coordinates
for image_x in range(_UpperCamelCase ):
for image_y in range(_UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowercase_ : List[str] = figure_width / image_width * image_height
lowercase_ : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase_ : Dict = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase_ : int = get_distance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase_ : Tuple = get_color_coded_rgb(_UpperCamelCase )
else:
lowercase_ : Optional[Any] = get_black_and_white_rgb(_UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCamelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 620 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _UpperCAmelCase :
def __init__( self : int , a : Dict , a : Optional[Any]=1_3 , a : str=3_0 , a : Dict=2 , a : List[Any]=3 , a : Optional[int]=True , a : Tuple=True , a : List[Any]=3_2 , a : int=2 , a : List[str]=4 , a : Dict=3_7 , a : Optional[Any]="gelu" , a : List[str]=0.1 , a : Dict=0.1 , a : List[Any]=1_0 , a : Union[str, Any]=0.02 , a : Union[str, Any]=3 , a : int=None , a : Any=2 , ):
'''simple docstring'''
lowercase_ : Any = parent
lowercase_ : Union[str, Any] = batch_size
lowercase_ : Any = image_size
lowercase_ : str = patch_size
lowercase_ : int = num_channels
lowercase_ : str = is_training
lowercase_ : str = use_labels
lowercase_ : List[Any] = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : str = hidden_act
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : List[Any] = initializer_range
lowercase_ : str = scope
lowercase_ : Tuple = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ : Any = (image_size // patch_size) ** 2
lowercase_ : str = num_patches + 2
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase__ ( self : Any , a : List[str] , a : str , a : Tuple ):
'''simple docstring'''
lowercase_ : List[str] = TFDeiTModel(config=a )
lowercase_ : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Dict , a : Optional[int] , a : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : List[str] = TFDeiTForMaskedImageModeling(config=a )
lowercase_ : Union[str, Any] = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ : Dict = 1
lowercase_ : Tuple = TFDeiTForMaskedImageModeling(a )
lowercase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Dict = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self : int , a : List[str] , a : List[str] , a : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.type_sequence_label_size
lowercase_ : Union[str, Any] = TFDeiTForImageClassification(a )
lowercase_ : int = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Dict = 1
lowercase_ : str = TFDeiTForImageClassification(a )
lowercase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Any = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = config_and_inputs
lowercase_ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: Tuple = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowerCamelCase: Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowerCamelCase: Union[str, Any] = False
__lowerCamelCase: Any = False
__lowerCamelCase: List[str] = False
__lowerCamelCase: Optional[Any] = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = TFDeiTModelTester(self )
lowercase_ : Any = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , tf.keras.layers.Dense ) )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = model_class(a )
lowercase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : int = [*signature.parameters.keys()]
lowercase_ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
def lowerCAmelCase__ ( self : Any , a : int , a : Union[str, Any] , a : Any=False ):
'''simple docstring'''
lowercase_ : Union[str, Any] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = TFDeiTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
lowercase_ : Union[str, Any] = self.default_image_processor
lowercase_ : Tuple = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=a , return_tensors="tf" )
# forward pass
lowercase_ : int = model(**a )
# verify the logits
lowercase_ : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase_ : Optional[Any] = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
| 620 | 1 |
def lowerCAmelCase ( snake_case__ : int )-> int:
A_ = [1]
A_ , A_ , A_ = 0, 0, 0
A_ = ugly_nums[ia] * 2
A_ = ugly_nums[ia] * 3
A_ = ugly_nums[ia] * 5
for _ in range(1 , snake_case__ ):
A_ = min(snake_case__ , snake_case__ , snake_case__ )
ugly_nums.append(snake_case__ )
if next_num == next_a:
ia += 1
A_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
A_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
A_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 707 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : Dict = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = ['GLPNFeatureExtractor']
__magic_name__ : Tuple = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 608 | 0 |
from __future__ import annotations
snake_case__ : List[str] = 1.6_021E-19 # units = C
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392 |
import argparse
import os
import re
import packaging.version
snake_case__ : List[Any] = '''examples/'''
snake_case__ : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
snake_case__ : Tuple = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
snake_case__ : Union[str, Any] = '''README.md'''
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase__ = f.read()
UpperCAmelCase__ , UpperCAmelCase__ = REPLACE_PATTERNS[pattern]
UpperCAmelCase__ = replace.replace("""VERSION""" , _lowerCAmelCase )
UpperCAmelCase__ = re_pattern.sub(_lowerCAmelCase , _lowerCAmelCase )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCAmelCase )
def lowercase ( _lowerCAmelCase ):
for folder, directories, fnames in os.walk(_lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , pattern="""examples""" )
def lowercase ( _lowerCAmelCase , _lowerCAmelCase=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not patch:
update_version_in_examples(_lowerCAmelCase )
def lowercase ( ):
UpperCAmelCase__ = """🤗 Transformers currently provides the following architectures"""
UpperCAmelCase__ = """1. Want to contribute a new model?"""
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase__ = f.readlines()
# Find the start of the list.
UpperCAmelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
UpperCAmelCase__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCAmelCase )
def lowercase ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
UpperCAmelCase__ = f.read()
UpperCAmelCase__ = REPLACE_PATTERNS["""init"""][0].search(_lowerCAmelCase ).groups()[0]
return packaging.version.parse(_lowerCAmelCase )
def lowercase ( _lowerCAmelCase=False ):
UpperCAmelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
UpperCAmelCase__ = default_version.base_version
elif patch:
UpperCAmelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
UpperCAmelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
UpperCAmelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCAmelCase , patch=_lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def lowercase ( ):
UpperCAmelCase__ = get_version()
UpperCAmelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
UpperCAmelCase__ = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
snake_case__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
snake_case__ : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 392 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__: Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__: List[Any] = "▁"
UpperCamelCase__: List[str] = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
UpperCamelCase__: List[str] = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
UpperCamelCase__: List[str] = {"vinai/bartpho-syllable": 1024}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : List[Any]="<s>" , __snake_case : str="</s>" , __snake_case : List[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Any="<unk>" , __snake_case : str="<pad>" , __snake_case : Dict="<mask>" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Dict , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Optional[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
UpperCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
UpperCAmelCase : Dict = vocab_file
UpperCAmelCase : Union[str, Any] = monolingual_vocab_file
UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCAmelCase : List[str] = {}
UpperCAmelCase : Any = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__snake_case ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase : Any = cnt
cnt += 1
with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
UpperCAmelCase : Tuple = line.strip().split()[0]
UpperCAmelCase : Any = len(self.fairseq_tokens_to_ids )
if str(__snake_case ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase : Optional[int] = len(self.fairseq_tokens_to_ids )
UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict ) -> str:
UpperCAmelCase : str = self.__dict__.copy()
UpperCAmelCase : Tuple = None
UpperCAmelCase : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , __snake_case : Any ) -> str:
UpperCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase : str = {}
UpperCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[Any] = [self.cls_token_id]
UpperCAmelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def A ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self : Optional[int] ) -> Dict:
return len(self.fairseq_ids_to_tokens )
def A ( self : Any ) -> Union[str, Any]:
UpperCAmelCase : Dict = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : int , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def A ( self : List[str] , __snake_case : Optional[int] ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def A ( self : Dict , __snake_case : List[Any] ) -> Optional[int]:
return self.fairseq_ids_to_tokens[index]
def A ( self : str , __snake_case : Optional[int] ) -> int:
UpperCAmelCase : Optional[Any] = ''''''.join(__snake_case ).replace(__snake_case , ''' ''' ).strip()
return out_string
def A ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Tuple = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase : List[Any] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , '''wb''' ) as fi:
UpperCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__snake_case ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __snake_case )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(__snake_case )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 704 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Union[str, Any] ) -> Any:
UpperCAmelCase : Optional[int] = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase : int = AutoTokenizer.from_pretrained(__snake_case )
UpperCAmelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained(__snake_case )
UpperCAmelCase : Optional[Any] = tokenizer('''This is me''' , return_tensors='''pt''' )
UpperCAmelCase : Union[str, Any] = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase : Dict = model.generate(**__snake_case )
UpperCAmelCase : Union[str, Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case )
UpperCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__snake_case )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase : Dict = model_reloaded.generate(**__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case ) )
def A ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase : Dict = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__snake_case )
UpperCAmelCase : List[Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__snake_case ):
model.save_pretrained(__snake_case )
UpperCAmelCase : Any = model.reverse_bettertransformer()
model.save_pretrained(__snake_case )
| 528 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase_ ( lowerCAmelCase__ , unittest.TestCase ):
a__ = ShapEImgaImgPipeline
a__ = ['''image''']
a__ = ['''image''']
a__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
a__ = False
@property
def A ( self ):
"""simple docstring"""
return 3_2
@property
def A ( self ):
"""simple docstring"""
return 3_2
@property
def A ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A ( self ):
"""simple docstring"""
return 8
@property
def A ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ :Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__magic_name__ :Union[str, Any] = CLIPVisionModel(__UpperCAmelCase )
return model
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_resize=__UpperCAmelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_2_4 , )
return image_processor
@property
def A ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ :Optional[int] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__magic_name__ :str = PriorTransformer(**__UpperCAmelCase )
return model
@property
def A ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ :int = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
__magic_name__ :List[str] = ShapERenderer(**__UpperCAmelCase )
return model
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.dummy_prior
__magic_name__ :List[Any] = self.dummy_image_encoder
__magic_name__ :str = self.dummy_image_processor
__magic_name__ :Dict = self.dummy_renderer
__magic_name__ :int = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_0_2_4 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
__magic_name__ :Dict = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def A ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
__magic_name__ :List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
if str(__UpperCAmelCase ).startswith('''mps''' ):
__magic_name__ :List[Any] = torch.manual_seed(__UpperCAmelCase )
else:
__magic_name__ :Tuple = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__magic_name__ :List[str] = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = '''cpu'''
__magic_name__ :Dict = self.get_dummy_components()
__magic_name__ :int = self.pipeline_class(**__UpperCAmelCase )
__magic_name__ :Union[str, Any] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__magic_name__ :Any = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
__magic_name__ :str = output.images[0]
__magic_name__ :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
__magic_name__ :List[str] = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self ):
"""simple docstring"""
__magic_name__ :str = torch_device == '''cpu'''
__magic_name__ :Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.get_dummy_components()
__magic_name__ :Dict = self.pipeline_class(**__UpperCAmelCase )
__magic_name__ :Any = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__magic_name__ :Tuple = 1
__magic_name__ :str = 2
__magic_name__ :str = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__magic_name__ :Tuple = batch_size * [inputs[key]]
__magic_name__ :Tuple = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
__magic_name__ :Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
__magic_name__ :Any = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
__magic_name__ :Tuple = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__magic_name__ :Dict = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
__magic_name__ :List[Any] = pipe(
__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='''np''' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 0 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def a__ ( _UpperCamelCase : List[str] ):
__lowerCamelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__lowerCamelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__lowerCamelCase = 4
__lowerCamelCase = 48
__lowerCamelCase = '''pixelshuffle_aux'''
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__lowerCamelCase = [6, 6, 6, 6]
__lowerCamelCase = 60
__lowerCamelCase = [6, 6, 6, 6]
__lowerCamelCase = '''pixelshuffledirect'''
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__lowerCamelCase = 4
__lowerCamelCase = '''nearest+conv'''
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__lowerCamelCase = 1
__lowerCamelCase = 1
__lowerCamelCase = 1_26
__lowerCamelCase = 7
__lowerCamelCase = 255.0
__lowerCamelCase = ''''''
return config
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ):
if "patch_embed.proj" in name and "layers" not in name:
__lowerCamelCase = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('''patch_embed.norm''' ,'''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
__lowerCamelCase = name.replace('''layers''' ,'''encoder.stages''' )
if "residual_group.blocks" in name:
__lowerCamelCase = name.replace('''residual_group.blocks''' ,'''layers''' )
if "attn.proj" in name:
__lowerCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
__lowerCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
__lowerCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
__lowerCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "q_bias" in name:
__lowerCamelCase = name.replace('''q_bias''' ,'''query.bias''' )
if "k_bias" in name:
__lowerCamelCase = name.replace('''k_bias''' ,'''key.bias''' )
if "v_bias" in name:
__lowerCamelCase = name.replace('''v_bias''' ,'''value.bias''' )
if "cpb_mlp" in name:
__lowerCamelCase = name.replace('''cpb_mlp''' ,'''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('''patch_embed.proj''' ,'''patch_embed.projection''' )
if name == "norm.weight":
__lowerCamelCase = '''layernorm.weight'''
if name == "norm.bias":
__lowerCamelCase = '''layernorm.bias'''
if "conv_first" in name:
__lowerCamelCase = name.replace('''conv_first''' ,'''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__lowerCamelCase = name.replace('''conv_last''' ,'''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__lowerCamelCase = name.replace('''conv_before_upsample.0''' ,'''conv_before_upsample''' )
if "upsample.0" in name:
__lowerCamelCase = name.replace('''upsample.0''' ,'''upsample.convolution_0''' )
if "upsample.2" in name:
__lowerCamelCase = name.replace('''upsample.2''' ,'''upsample.convolution_1''' )
__lowerCamelCase = '''upsample.''' + name
elif config.upsampler == "pixelshuffledirect":
__lowerCamelCase = name.replace('''upsample.0.weight''' ,'''upsample.conv.weight''' )
__lowerCamelCase = name.replace('''upsample.0.bias''' ,'''upsample.conv.bias''' )
else:
pass
else:
__lowerCamelCase = '''swin2sr.''' + name
return name
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Union[str, Any] ):
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
__lowerCamelCase = key.split('''.''' )
__lowerCamelCase = int(key_split[1] )
__lowerCamelCase = int(key_split[4] )
__lowerCamelCase = config.embed_dim
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
pass
else:
__lowerCamelCase = val
return orig_state_dict
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : int ,_UpperCamelCase : Any ):
__lowerCamelCase = get_config(_UpperCamelCase )
__lowerCamelCase = SwinaSRForImageSuperResolution(_UpperCamelCase )
model.eval()
__lowerCamelCase = torch.hub.load_state_dict_from_url(_UpperCamelCase ,map_location='''cpu''' )
__lowerCamelCase = convert_state_dict(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(_UpperCamelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
__lowerCamelCase = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'''
__lowerCamelCase = Image.open(requests.get(_UpperCamelCase ,stream=_UpperCamelCase ).raw ).convert('''RGB''' )
__lowerCamelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__lowerCamelCase = 1_26 if '''Jpeg''' in checkpoint_url else 2_56
__lowerCamelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
__lowerCamelCase = transforms(_UpperCamelCase ).unsqueeze(0 )
if config.num_channels == 1:
__lowerCamelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
__lowerCamelCase = model(_UpperCamelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__lowerCamelCase = torch.Size([1, 3, 5_12, 5_12] )
__lowerCamelCase = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__lowerCamelCase = torch.Size([1, 3, 10_24, 10_24] )
__lowerCamelCase = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__lowerCamelCase = torch.Size([1, 3, 10_24, 10_24] )
__lowerCamelCase = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__lowerCamelCase = torch.Size([1, 3, 5_12, 5_12] )
__lowerCamelCase = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__lowerCamelCase = torch.Size([1, 3, 10_24, 10_24] )
__lowerCamelCase = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] ,_UpperCamelCase ,atol=1e-3 )
print('''Looks ok!''' )
__lowerCamelCase = {
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': (
'''swin2SR-classical-sr-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': (
'''swin2SR-classical-sr-x4-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': (
'''swin2SR-compressed-sr-x4-48'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': (
'''swin2SR-lightweight-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': (
'''swin2SR-realworld-sr-x4-64-bsrgan-psnr'''
),
}
__lowerCamelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
a_ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 175 | 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : str ):
if len(lowerCAmelCase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
a_ : Dict = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 0 |
import sys
a_ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _a = N):
SCREAMING_SNAKE_CASE : Optional[Any] = -sys.maxsize - 1
for i in range(len(_a) - 12):
SCREAMING_SNAKE_CASE : List[Any] = 1
for j in range(13):
product *= int(n[i + j])
if product > largest_product:
SCREAMING_SNAKE_CASE : Union[str, Any] = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''') | 25 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowercase ( __snake_case : Dict ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowercase ( ):
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowercase_ : Union[str, Any] = [1, 2, 3]
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case , __snake_case , num_proc=2 )
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case , __snake_case , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def lowercase ( __snake_case : Union[str, Any] ):
lowercase_ : Dict = [1, 2]
lowercase_ : Tuple = {'''a''': 1, '''b''': 2}
lowercase_ : Union[str, Any] = {'''a''': [1, 2], '''b''': [3, 4]}
lowercase_ : Dict = {'''a''': {'''1''': 1}, '''b''': 2}
lowercase_ : int = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowercase_ : Any = [2, 3]
lowercase_ : Optional[int] = {'''a''': 2, '''b''': 3}
lowercase_ : Any = {'''a''': [2, 3], '''b''': [4, 5]}
lowercase_ : str = {'''a''': {'''1''': 2}, '''b''': 3}
lowercase_ : Union[str, Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
| 231 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase__ : str = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : Optional[Any] = "RegNetConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040"
UpperCAmelCase__ : int = [1, 10_88, 7, 7]
# Image classification docstring
UpperCAmelCase__ : str = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[int] = "tabby, tabby cat"
UpperCAmelCase__ : Optional[int] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ = 3 , lowercase_ = 1 , lowercase_ = 1 , lowercase_ = "relu" , ) -> List[str]:
super().__init__()
__snake_case = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
__snake_case = nn.BatchNormad(lowercase_)
__snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def _a ( self , lowercase_) -> int:
__snake_case = self.convolution(lowercase_)
__snake_case = self.normalization(lowercase_)
__snake_case = self.activation(lowercase_)
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self , lowercase_) -> Optional[int]:
super().__init__()
__snake_case = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
__snake_case = config.num_channels
def _a ( self , lowercase_) -> Any:
__snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
__snake_case = self.embedder(lowercase_)
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ = 2) -> Union[str, Any]:
super().__init__()
__snake_case = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_)
__snake_case = nn.BatchNormad(lowercase_)
def _a ( self , lowercase_) -> Tensor:
__snake_case = self.convolution(lowercase_)
__snake_case = self.normalization(lowercase_)
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_) -> List[str]:
super().__init__()
__snake_case = nn.AdaptiveAvgPoolad((1, 1))
__snake_case = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1) , nn.Sigmoid() , )
def _a ( self , lowercase_) -> int:
# b c h w -> b c 1 1
__snake_case = self.pooler(lowercase_)
__snake_case = self.attention(lowercase_)
__snake_case = hidden_state * attention
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1) -> Any:
super().__init__()
__snake_case = in_channels != out_channels or stride != 1
__snake_case = max(1 , out_channels // config.groups_width)
__snake_case = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_) if should_apply_shortcut else nn.Identity()
)
__snake_case = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_) , )
__snake_case = ACTaFN[config.hidden_act]
def _a ( self , lowercase_) -> Optional[int]:
__snake_case = hidden_state
__snake_case = self.layer(lowercase_)
__snake_case = self.shortcut(lowercase_)
hidden_state += residual
__snake_case = self.activation(lowercase_)
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1) -> Dict:
super().__init__()
__snake_case = in_channels != out_channels or stride != 1
__snake_case = max(1 , out_channels // config.groups_width)
__snake_case = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_) if should_apply_shortcut else nn.Identity()
)
__snake_case = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_) , )
__snake_case = ACTaFN[config.hidden_act]
def _a ( self , lowercase_) -> Union[str, Any]:
__snake_case = hidden_state
__snake_case = self.layer(lowercase_)
__snake_case = self.shortcut(lowercase_)
hidden_state += residual
__snake_case = self.activation(lowercase_)
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 2 , lowercase_ = 2 , ) -> Dict:
super().__init__()
__snake_case = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_) for _ in range(depth - 1)] , )
def _a ( self , lowercase_) -> Union[str, Any]:
__snake_case = self.layers(lowercase_)
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self , lowercase_) -> Union[str, Any]:
super().__init__()
__snake_case = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
__snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:]):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_))
def _a ( self , lowercase_ , lowercase_ = False , lowercase_ = True) -> BaseModelOutputWithNoAttention:
__snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case = hidden_states + (hidden_state,)
__snake_case = stage_module(lowercase_)
if output_hidden_states:
__snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = RegNetConfig
__UpperCAmelCase = '''regnet'''
__UpperCAmelCase = '''pixel_values'''
__UpperCAmelCase = True
def _a ( self , lowercase_) -> Union[str, Any]:
if isinstance(lowercase_ , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu')
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def _a ( self , lowercase_ , lowercase_=False) -> Union[str, Any]:
if isinstance(lowercase_ , lowercase_):
__snake_case = value
UpperCAmelCase__ : List[str] = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : Dict = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , lowerCamelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_) -> List[Any]:
super().__init__(lowercase_)
__snake_case = config
__snake_case = RegNetEmbeddings(lowercase_)
__snake_case = RegNetEncoder(lowercase_)
__snake_case = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None) -> BaseModelOutputWithPoolingAndNoAttention:
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.embedder(lowercase_)
__snake_case = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_)
__snake_case = encoder_outputs[0]
__snake_case = self.pooler(lowercase_)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowerCamelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_) -> Any:
super().__init__(lowercase_)
__snake_case = config.num_labels
__snake_case = RegNetModel(lowercase_)
# classification head
__snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> ImageClassifierOutputWithNoAttention:
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_)
__snake_case = outputs.pooler_output if return_dict else outputs[1]
__snake_case = self.classifier(lowercase_)
__snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case = 'single_label_classification'
else:
__snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
__snake_case = MSELoss()
if self.num_labels == 1:
__snake_case = loss_fct(logits.squeeze() , labels.squeeze())
else:
__snake_case = loss_fct(lowercase_ , lowercase_)
elif self.config.problem_type == "single_label_classification":
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__snake_case = BCEWithLogitsLoss()
__snake_case = loss_fct(lowercase_ , lowercase_)
if not return_dict:
__snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states)
| 721 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 0 |
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_SCREAMING_SNAKE_CASE ) # min() finds the minimum value
SCREAMING_SNAKE_CASE__ : Dict = max(_SCREAMING_SNAKE_CASE ) # max() finds the maximum value
SCREAMING_SNAKE_CASE__ : Optional[int] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE__ : Any = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE__ : str = 0
for count in range(_SCREAMING_SNAKE_CASE ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE__ : str = count + min_val
i += 1
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_SCREAMING_SNAKE_CASE )
print('Sorted order is:' , ' '.join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 85 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class a__ ( a_ ):
'''simple docstring'''
A : Union[str, Any] = '''roberta'''
def __init__( self : Any , lowerCAmelCase_ : int=50_265 , lowerCAmelCase_ : int=768 , lowerCAmelCase_ : Optional[Any]=12 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Optional[int]=3_072 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=1E-12 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : List[str]="absolute" , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : Any , ) -> int:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
__A= vocab_size
__A= hidden_size
__A= num_hidden_layers
__A= num_attention_heads
__A= hidden_act
__A= intermediate_size
__A= hidden_dropout_prob
__A= attention_probs_dropout_prob
__A= max_position_embeddings
__A= type_vocab_size
__A= initializer_range
__A= layer_norm_eps
__A= position_embedding_type
__A= use_cache
__A= classifier_dropout
class a__ ( a_ ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__A= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 186 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
lowercase_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "." ) -> None:
'''simple docstring'''
lowercase_ = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = os.path.split(__lowerCAmelCase )
if filepath != old_path:
lowercase_ = print_path(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase_ = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
lowercase_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 100 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def _UpperCAmelCase ( *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Tuple):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""")
lowercase_ = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = object_detector(examples[0] , threshold=0.0)
lowercase_ = len(lowerCAmelCase_)
self.assertGreater(lowerCAmelCase_ , 0)
self.assertEqual(
lowerCAmelCase_ , [
{
"""score""": ANY(lowerCAmelCase_),
"""label""": ANY(lowerCAmelCase_),
"""box""": {"""xmin""": ANY(lowerCAmelCase_), """ymin""": ANY(lowerCAmelCase_), """xmax""": ANY(lowerCAmelCase_), """ymax""": ANY(lowerCAmelCase_)},
}
for i in range(lowerCAmelCase_)
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""")
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
pass
@require_torch
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""")
lowercase_ = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.7_235, """label""": """cat""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.7_218, """label""": """remote""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.7_184, """label""": """couch""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.6_748, """label""": """remote""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6_656, """label""": """cat""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6_614, """label""": """couch""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6_456, """label""": """remote""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 6_7, """ymin""": 2_7_4, """xmax""": 9_3, """ymax""": 2_9_7}},
{"""score""": 0.6_419, """label""": """cat""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
] , )
lowercase_ = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.7_235, """label""": """cat""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.7_218, """label""": """remote""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.7_184, """label""": """couch""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.6_748, """label""": """remote""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6_656, """label""": """cat""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6_614, """label""": """couch""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6_456, """label""": """remote""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 6_7, """ymin""": 2_7_4, """xmax""": 9_3, """ymax""": 2_9_7}},
{"""score""": 0.6_419, """label""": """cat""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
]
] , )
@require_torch
@slow
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = pipeline("""zero-shot-object-detection""")
lowercase_ = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
] , )
lowercase_ = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
],
[
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""")
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
pass
@require_torch
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = 0.2
lowercase_ = pipeline("""zero-shot-object-detection""")
lowercase_ = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
] , )
@require_torch
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = 2
lowercase_ = pipeline("""zero-shot-object-detection""")
lowercase_ = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
] , )
| 100 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Any:
a = MobileNetVaConfig(layer_norm_eps=0.001)
if "_quant" in model_name:
raise ValueError("Quantized models are not supported.")
a = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" , __UpperCamelCase)
if matches:
a = float(matches[1])
a = int(matches[2])
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
a = 10_01
a = "imagenet-1k-id2label.json"
a = "huggingface/label-files"
a = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset") , "r"))
a = {int(__UpperCamelCase) + 1: v for k, v in idalabel.items()}
a = "background"
a = idalabel
a = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
a = "http://images.cocodataset.org/val2017/000000039769.jpg"
a = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase).raw)
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False) -> Any:
a = get_mobilenet_va_config(__UpperCamelCase)
# Load 🤗 model
a = MobileNetVaForImageClassification(__UpperCamelCase).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
a = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
a = image_processor(images=prepare_img() , return_tensors="pt")
a = model(**__UpperCamelCase)
a = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
a = torch.tensor([-4.1_739, -1.1_233, 3.1_205])
elif model_name == "mobilenet_v1_0.75_192":
a = torch.tensor([-3.9_440, -2.3_141, -0.3_333])
else:
a = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4)
Path(__UpperCamelCase).mkdir(exist_ok=__UpperCamelCase)
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(__UpperCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(__UpperCamelCase)
if push_to_hub:
print("Pushing to the hub...")
a = "google/" + model_name
image_processor.push_to_hub(__UpperCamelCase)
model.push_to_hub(__UpperCamelCase)
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase__ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 515 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Dict = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 515 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
a_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
UpperCamelCase =field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
UpperCamelCase =field(
default=lowerCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCamelCase =field(
default=lowerCAmelCase__ , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
UpperCamelCase =field(default=lowerCAmelCase__ , metadata={"help": "A folder containing the training data."} )
UpperCamelCase =field(default=lowerCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
UpperCamelCase =field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
UpperCamelCase =field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
UpperCamelCase =field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
UpperCamelCase =field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCamelCase =field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : List[str] = {}
if self.train_dir is not None:
__lowercase : Any = self.train_dir
if self.validation_dir is not None:
__lowercase : str = self.validation_dir
__lowercase : Optional[Any] = data_files if data_files else None
@dataclass
class UpperCAmelCase_ :
UpperCamelCase =field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
UpperCamelCase =field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
UpperCamelCase =field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase =field(
default=lowerCAmelCase__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
UpperCamelCase =field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
UpperCamelCase =field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCamelCase =field(default=lowerCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
UpperCamelCase =field(
default=lowerCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
UpperCamelCase =field(
default=lowerCAmelCase__ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
UpperCamelCase =field(
default=lowerCAmelCase__ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
UpperCamelCase =field(
default=lowerCAmelCase__ , metadata={"help": "Stride to use for the encoder."} , )
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_=1_92 , UpperCamelCase_=32 , UpperCamelCase_=4 , UpperCamelCase_=0.6 ) -> str:
__lowercase : Optional[Any] = input_size
__lowercase : Dict = mask_patch_size
__lowercase : Any = model_patch_size
__lowercase : List[Any] = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
__lowercase : str = self.input_size // self.mask_patch_size
__lowercase : Optional[int] = self.mask_patch_size // self.model_patch_size
__lowercase : Dict = self.rand_size**2
__lowercase : Tuple = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> Optional[Any]:
__lowercase : Optional[int] = np.random.permutation(self.token_count )[: self.mask_count]
__lowercase : str = np.zeros(self.token_count , dtype=_lowerCamelCase )
__lowercase : List[str] = 1
__lowercase : Dict = mask.reshape((self.rand_size, self.rand_size) )
__lowercase : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Tuple = torch.stack([example['''pixel_values'''] for example in examples] )
__lowercase : List[Any] = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def __UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase ,__lowercase ,__lowercase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase ,__lowercase ,__lowercase : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , lowerCamelCase_ , lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase : str = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__lowercase : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__lowercase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowercase : List[str] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCamelCase_ ) and data_args.train_val_split > 0.0:
__lowercase : int = ds['''train'''].train_test_split(data_args.train_val_split )
__lowercase : Union[str, Any] = split['''train''']
__lowercase : Dict = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase : Tuple = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__lowercase : Tuple = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowerCamelCase_ )
elif model_args.model_name_or_path:
__lowercase : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCamelCase_ )
else:
__lowercase : int = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowerCamelCase_ , '''decoder_type''' ):
__lowercase : List[str] = '''simmim'''
# adapt config
__lowercase : List[str] = model_args.image_size if model_args.image_size is not None else config.image_size
__lowercase : Union[str, Any] = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__lowercase : str = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__lowercase : Optional[Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowerCamelCase_ )
elif model_args.model_name_or_path:
__lowercase : int = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowerCamelCase_ )
else:
__lowercase : Any = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__lowercase : int = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__lowercase : Optional[int] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__lowercase : int = AutoModelForMaskedImageModeling.from_config(lowerCamelCase_ )
if training_args.do_train:
__lowercase : int = ds['''train'''].column_names
else:
__lowercase : Optional[int] = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__lowercase : str = data_args.image_column_name
elif "image" in column_names:
__lowercase : Union[str, Any] = '''image'''
elif "img" in column_names:
__lowercase : Optional[int] = '''img'''
else:
__lowercase : int = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__lowercase : int = Compose(
[
Lambda(lambda __UpperCamelCase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__lowercase : str = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(__UpperCamelCase ):
__lowercase : Any = [transforms(lowerCamelCase_ ) for image in examples[image_column_name]]
__lowercase : Any = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__lowercase : List[Any] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCamelCase_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__lowercase : int = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCamelCase_ )
# Initialize our trainer
__lowercase : Dict = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase : List[str] = None
if training_args.resume_from_checkpoint is not None:
__lowercase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase : Union[str, Any] = last_checkpoint
__lowercase : Tuple = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase : Any = trainer.evaluate()
trainer.log_metrics('''eval''' , lowerCamelCase_ )
trainer.save_metrics('''eval''' , lowerCamelCase_ )
# Write model card and (optionally) push to hub
__lowercase : List[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
if __name__ == "__main__":
main()
| 712 |
"""simple docstring"""
a_ = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
a_ = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
a_ = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
a_ = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
a_ = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
a_ = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
a_ = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
a_ = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 523 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""Salesforce/codegen-350M-mono""": 20_48,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = CodeGenTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
if kwargs.pop('add_bos_token' , snake_case__ ):
_lowerCAmelCase : Tuple = kwargs.pop('name_or_path' , '' )
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.' )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : List[Any] = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : Dict = add_prefix_space
_lowerCAmelCase : List[Any] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Tuple = add_prefix_space
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__ = False , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Dict = super().decode(
token_ids=snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , **snake_case__ , )
if truncate_before_pattern is not None and len(snake_case__ ) > 0:
_lowerCAmelCase : Optional[int] = self.truncate(snake_case__ , snake_case__ )
return decoded_text
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
def find_re(snake_case__ , snake_case__ , snake_case__ ):
_lowerCAmelCase : List[Any] = pattern.search(snake_case__ , snake_case__ )
return m.start() if m else -1
_lowerCAmelCase : Any = [re.compile(snake_case__ , re.MULTILINE ) for pattern in truncate_before_pattern]
_lowerCAmelCase : Dict = list(re.finditer('^print' , snake_case__ , re.MULTILINE ) )
if len(snake_case__ ) > 1:
_lowerCAmelCase : Optional[int] = completion[: prints[1].start()]
_lowerCAmelCase : Optional[int] = list(re.finditer('^def' , snake_case__ , re.MULTILINE ) )
if len(snake_case__ ) > 1:
_lowerCAmelCase : Optional[Any] = completion[: defs[1].start()]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Tuple = [
pos for pos in [find_re(snake_case__ , snake_case__ , snake_case__ ) for terminal in terminals] if pos != -1
]
if len(snake_case__ ) > 0:
return completion[: min(snake_case__ )]
else:
return completion
| 444 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase ) -> list[int]:
UpperCamelCase : Optional[Any] = [True] * limit
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase : Optional[Any] = i * 2
while index < limit:
UpperCamelCase : int = False
UpperCamelCase : Optional[int] = index + i
UpperCamelCase : Any = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def A_ ( _lowerCAmelCase = 100_0000 ) -> int:
UpperCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
UpperCamelCase : Union[str, Any] = 0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
UpperCamelCase : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase : int = j - i
UpperCamelCase : Dict = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 711 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = ['audio_values', 'audio_mask']
def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ):
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
UpperCamelCase : Optional[int] = spectrogram_length
UpperCamelCase : Dict = num_channels
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : str = feature_size // self.patch_size[1]
UpperCamelCase : List[str] = n_fft
UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase : Optional[int] = sampling_rate
UpperCamelCase : int = padding_value
UpperCamelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCamelCase : List[Any] = log_spec[:, :-1]
UpperCamelCase : Optional[int] = log_spec - 20.0
UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
UpperCamelCase : Union[str, Any] = audio_features[i]
UpperCamelCase : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCamelCase : int = {"audio_values": padded_audio_features}
UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 38 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Union[str, Any] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Tuple = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = resnets
UpperCAmelCase_ : List[str] = attentions
if self.add_downsample:
UpperCAmelCase_ : Tuple = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = attn(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : Any = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = True
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : Dict = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> Any:
UpperCAmelCase_ : str = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : Optional[Any] = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Tuple = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> Dict:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : int = res_hidden_states_tuple[-1]
UpperCAmelCase_ : int = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[str] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = attn(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = True
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Any = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = resnets
if self.add_upsample:
UpperCAmelCase_ : List[str] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> str:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Optional[Any] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[str] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Optional[Any] = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
UpperCAmelCase_ : Tuple = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> Tuple:
# there is always at least one resnet
UpperCAmelCase_ : Any = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : int = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Tuple = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = resnets
UpperCAmelCase_ : Optional[Any] = attentions
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> Any:
UpperCAmelCase_ : int = self.resnets[0](_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[int] = attn(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
return hidden_states | 30 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict:
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
UpperCAmelCase_ : Optional[int] = n_copies
def __iter__( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = start_length
UpperCAmelCase_ : Optional[int] = eof_strings
UpperCAmelCase_ : str = tokenizer
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1]
UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase )
UpperCAmelCase_ : Dict = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase )
UpperCAmelCase_ : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ : List[Any] = '''false'''
if args.num_workers is None:
UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ : int = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ : str = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' )
UpperCAmelCase_ : Dict = load_metric('''code_eval''' )
UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
UpperCAmelCase_ : str = args.n_samples // args.batch_size
UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase )
UpperCAmelCase_ : int = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
UpperCAmelCase_ : Any = []
for task in tqdm(range(_lowercase ) ):
UpperCAmelCase_ : int = human_eval['''test'''][task]['''test''']
UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 30 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCamelCase :
_UpperCamelCase : Optional[Any] = PegasusConfig
_UpperCamelCase : Any = {}
_UpperCamelCase : int = '''gelu'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int]=13 , _SCREAMING_SNAKE_CASE: int=7 , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Any=False , _SCREAMING_SNAKE_CASE: List[Any]=99 , _SCREAMING_SNAKE_CASE: Tuple=32 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: List[str]=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=37 , _SCREAMING_SNAKE_CASE: Dict=0.1 , _SCREAMING_SNAKE_CASE: List[str]=0.1 , _SCREAMING_SNAKE_CASE: Dict=40 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: int=0 , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def lowercase ( self: str ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_ = prepare_pegasus_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, inputs_dict
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = TFPegasusModel(config=_SCREAMING_SNAKE_CASE ).get_decoder()
UpperCamelCase_ = inputs_dict["input_ids"]
UpperCamelCase_ = input_ids[:1, :]
UpperCamelCase_ = inputs_dict["attention_mask"][:1, :]
UpperCamelCase_ = inputs_dict["head_mask"]
UpperCamelCase_ = 1
# first forward pass
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1e-3 )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ) -> List[str]:
if attention_mask is None:
UpperCamelCase_ = tf.cast(tf.math.not_equal(UpperCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_UpperCamelCase : Optional[int] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_UpperCamelCase : List[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCamelCase : int = True
_UpperCamelCase : str = False
_UpperCamelCase : Dict = False
def lowercase ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = TFPegasusModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def lowercase ( self: Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
_UpperCamelCase : Any = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase : List[str] = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_UpperCamelCase : List[Any] = '''google/pegasus-xsum'''
@cached_property
def lowercase ( self: Union[str, Any] ) -> int:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowercase ( self: int ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowercase ( self: Tuple , **_SCREAMING_SNAKE_CASE: Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.translate_src_text(**_SCREAMING_SNAKE_CASE )
assert self.expected_text == generated_words
def lowercase ( self: str , **_SCREAMING_SNAKE_CASE: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.tokenizer(self.src_text , **_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="tf" )
UpperCamelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_SCREAMING_SNAKE_CASE )
return generated_words
@slow
def lowercase ( self: Dict ) -> str:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 715 |
_UpperCAmelCase = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
_UpperCAmelCase = {value: key for key, value in encode_dict.items()}
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
UpperCamelCase_ = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
if set(UpperCamelCase_ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCamelCase_ = ""
for word in coded.split():
while len(UpperCamelCase_ ) != 0:
decoded += decode_dict[word[:5]]
UpperCamelCase_ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 371 | 0 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
__lowerCAmelCase = 'us-east-1' # defaults region
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
lowerCAmelCase_ = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_00,
"save_steps": 55_00,
}
lowerCAmelCase_ = {**hyperparameters, "max_steps": 10_00}
@property
def lowercase (self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowercase (self ) -> str:
return f"""{self.framework}-transfromers-test"""
@property
def lowercase (self ) -> str:
return f"""./tests/sagemaker/scripts/{self.framework}"""
@property
def lowercase (self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = SageMakerTestEnvironment(framework=request.cls.framework ) | 585 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
_snake_case = b * b - 4 * a * c
_snake_case = (-b + sqrt(_SCREAMING_SNAKE_CASE )) / (2 * a)
_snake_case = (-b - sqrt(_SCREAMING_SNAKE_CASE )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __SCREAMING_SNAKE_CASE ( ):
_snake_case, _snake_case = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main() | 585 | 1 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
"""simple docstring"""
import baseaa
def lowercase_ ( _snake_case ):
return baseaa.baaencode(string.encode("""utf-8""" ) )
def lowercase_ ( _snake_case ):
return baseaa.baadecode(_snake_case ).decode("""utf-8""" )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = 'Hello World!'
UpperCAmelCase__ : Tuple = baseaa_encode(test)
print(encoded)
UpperCAmelCase__ : Tuple = baseaa_decode(encoded)
print(decoded)
| 545 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : "DiagonalGaussianDistribution"
class lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ):
UpperCamelCase_ : Optional[Any] = True
@register_to_config
def __init__( self :Tuple , lowercase :int = 3 , lowercase :int = 3 , lowercase :Tuple[str] = ("DownEncoderBlock2D",) , lowercase :Tuple[str] = ("UpDecoderBlock2D",) , lowercase :Tuple[int] = (6_4,) , lowercase :int = 1 , lowercase :str = "silu" , lowercase :int = 4 , lowercase :int = 3_2 , lowercase :int = 3_2 , lowercase :float = 0.1_82_15 , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE = Encoder(
in_channels=lowercase , out_channels=lowercase , down_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , double_z=lowercase , )
# pass init params to Decoder
SCREAMING_SNAKE_CASE = Decoder(
in_channels=lowercase , out_channels=lowercase , up_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , norm_num_groups=lowercase , act_fn=lowercase , )
SCREAMING_SNAKE_CASE = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
SCREAMING_SNAKE_CASE = nn.Convad(lowercase , lowercase , 1 )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
# only relevant if vae tiling is enabled
SCREAMING_SNAKE_CASE = self.config.sample_size
SCREAMING_SNAKE_CASE = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
SCREAMING_SNAKE_CASE = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
SCREAMING_SNAKE_CASE = 0.25
def snake_case__ ( self :Optional[Any] , lowercase :Any , lowercase :Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
if isinstance(lowercase , (Encoder, Decoder) ):
SCREAMING_SNAKE_CASE = value
def snake_case__ ( self :List[str] , lowercase :bool = True ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = use_tiling
def snake_case__ ( self :Union[str, Any] ) -> Dict:
"""simple docstring"""
self.enable_tiling(lowercase )
def snake_case__ ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
def snake_case__ ( self :Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def snake_case__ ( self :Optional[Any] ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
def fn_recursive_add_processors(lowercase :str , lowercase :torch.nn.Module , lowercase :Dict[str, AttentionProcessor] ):
if hasattr(lowercase , '''set_processor''' ):
SCREAMING_SNAKE_CASE = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowercase , lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowercase , lowercase , lowercase )
return processors
def snake_case__ ( self :Any , lowercase :Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = len(self.attn_processors.keys() )
if isinstance(lowercase , lowercase ) and len(lowercase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowercase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowercase :str , lowercase :torch.nn.Module , lowercase :List[str] ):
if hasattr(lowercase , '''set_processor''' ):
if not isinstance(lowercase , lowercase ):
module.set_processor(lowercase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowercase , lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowercase , lowercase , lowercase )
def snake_case__ ( self :int ) -> List[Any]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def snake_case__ ( self :str , lowercase :torch.FloatTensor , lowercase :bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowercase , return_dict=lowercase )
if self.use_slicing and x.shape[0] > 1:
SCREAMING_SNAKE_CASE = [self.encoder(lowercase ) for x_slice in x.split(1 )]
SCREAMING_SNAKE_CASE = torch.cat(lowercase )
else:
SCREAMING_SNAKE_CASE = self.encoder(lowercase )
SCREAMING_SNAKE_CASE = self.quant_conv(lowercase )
SCREAMING_SNAKE_CASE = DiagonalGaussianDistribution(lowercase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowercase )
def snake_case__ ( self :int , lowercase :torch.FloatTensor , lowercase :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowercase , return_dict=lowercase )
SCREAMING_SNAKE_CASE = self.post_quant_conv(lowercase )
SCREAMING_SNAKE_CASE = self.decoder(lowercase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase )
@apply_forward_hook
def snake_case__ ( self :List[Any] , lowercase :torch.FloatTensor , lowercase :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
SCREAMING_SNAKE_CASE = [self._decode(lowercase ).sample for z_slice in z.split(1 )]
SCREAMING_SNAKE_CASE = torch.cat(lowercase )
else:
SCREAMING_SNAKE_CASE = self._decode(lowercase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowercase )
def snake_case__ ( self :List[Any] , lowercase :List[Any] , lowercase :Optional[Any] , lowercase :int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(a.shape[2] , b.shape[2] , lowercase )
for y in range(lowercase ):
SCREAMING_SNAKE_CASE = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def snake_case__ ( self :Tuple , lowercase :Tuple , lowercase :Tuple , lowercase :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(a.shape[3] , b.shape[3] , lowercase )
for x in range(lowercase ):
SCREAMING_SNAKE_CASE = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def snake_case__ ( self :Any , lowercase :torch.FloatTensor , lowercase :bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE = int(self.tile_latent_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
SCREAMING_SNAKE_CASE = []
for i in range(0 , x.shape[2] , lowercase ):
SCREAMING_SNAKE_CASE = []
for j in range(0 , x.shape[3] , lowercase ):
SCREAMING_SNAKE_CASE = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
SCREAMING_SNAKE_CASE = self.encoder(lowercase )
SCREAMING_SNAKE_CASE = self.quant_conv(lowercase )
row.append(lowercase )
rows.append(lowercase )
SCREAMING_SNAKE_CASE = []
for i, row in enumerate(lowercase ):
SCREAMING_SNAKE_CASE = []
for j, tile in enumerate(lowercase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE = self.blend_v(rows[i - 1][j] , lowercase , lowercase )
if j > 0:
SCREAMING_SNAKE_CASE = self.blend_h(row[j - 1] , lowercase , lowercase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowercase , dim=3 ) )
SCREAMING_SNAKE_CASE = torch.cat(lowercase , dim=2 )
SCREAMING_SNAKE_CASE = DiagonalGaussianDistribution(lowercase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowercase )
def snake_case__ ( self :Optional[int] , lowercase :torch.FloatTensor , lowercase :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE = int(self.tile_sample_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
SCREAMING_SNAKE_CASE = []
for i in range(0 , z.shape[2] , lowercase ):
SCREAMING_SNAKE_CASE = []
for j in range(0 , z.shape[3] , lowercase ):
SCREAMING_SNAKE_CASE = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
SCREAMING_SNAKE_CASE = self.post_quant_conv(lowercase )
SCREAMING_SNAKE_CASE = self.decoder(lowercase )
row.append(lowercase )
rows.append(lowercase )
SCREAMING_SNAKE_CASE = []
for i, row in enumerate(lowercase ):
SCREAMING_SNAKE_CASE = []
for j, tile in enumerate(lowercase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE = self.blend_v(rows[i - 1][j] , lowercase , lowercase )
if j > 0:
SCREAMING_SNAKE_CASE = self.blend_h(row[j - 1] , lowercase , lowercase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowercase , dim=3 ) )
SCREAMING_SNAKE_CASE = torch.cat(lowercase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase )
def snake_case__ ( self :Union[str, Any] , lowercase :torch.FloatTensor , lowercase :bool = False , lowercase :bool = True , lowercase :Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = sample
SCREAMING_SNAKE_CASE = self.encode(lowercase ).latent_dist
if sample_posterior:
SCREAMING_SNAKE_CASE = posterior.sample(generator=lowercase )
else:
SCREAMING_SNAKE_CASE = posterior.mode()
SCREAMING_SNAKE_CASE = self.decode(lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase ) | 201 |
import os
def a ( a = "matrix.txt" ) ->int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(a ) , a ) ) as in_file:
SCREAMING_SNAKE_CASE = in_file.read()
SCREAMING_SNAKE_CASE = [[int(a ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE = len(grid[0] )
SCREAMING_SNAKE_CASE = [[0 for i in range(a )] for j in range(a )]
SCREAMING_SNAKE_CASE = grid[0][0]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE = grid[0][i] + dp[0][i - 1]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE = grid[i][0] + dp[i - 1][0]
for i in range(1 , a ):
for j in range(1 , a ):
SCREAMING_SNAKE_CASE = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''') | 201 | 1 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Dict = 1_6
A : Optional[int] = 3_2
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = 16 ):
'''simple docstring'''
__lowerCAmelCase = AutoTokenizer.from_pretrained("bert-base-cased" )
__lowerCAmelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
_UpperCamelCase , padding="longest" , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
__lowerCAmelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : List[Any] = mocked_dataloaders # noqa: F811
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCamelCase ) == "1":
__lowerCAmelCase = 2
# Initialize accelerator
__lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["lr"]
__lowerCAmelCase = int(config["num_epochs"] )
__lowerCAmelCase = int(config["seed"] )
__lowerCAmelCase = int(config["batch_size"] )
__lowerCAmelCase = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCamelCase )
def inner_training_loop(_UpperCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters() , lr=_UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase = model(**_UpperCamelCase )
__lowerCAmelCase = outputs.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**_UpperCamelCase )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCamelCase , default=_UpperCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 282 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,)
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__a , )
assert hasattr(self , "env" )
def snake_case ( self , __a ):
# configuration for running training on smdistributed Model Parallel
__lowerCAmelCase = {
"enabled": True,
"processes_per_host": 8,
}
__lowerCAmelCase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowerCAmelCase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowerCAmelCase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" , instance_count=__a , instance_type=self.instance_type , debugger_hook_config=__a , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=__a , py_version="py36" , )
def snake_case ( self , __a ):
TrainingJobAnalytics(__a ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(1,)] )
def snake_case ( self , __a ):
# create estimator
__lowerCAmelCase = self.create_estimator(__a )
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __a )
| 282 | 1 |
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
__UpperCamelCase : int = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" )
__UpperCamelCase : Optional[int] = soup.findAll("""h1""" )
__UpperCamelCase : Optional[int] = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCAmelCase , __lowerCAmelCase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 269 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A ( UpperCAmelCase_ , unittest.TestCase ):
lowercase_ : Union[str, Any] = LDMTextToImagePipeline
lowercase_ : Tuple = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
lowercase_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
lowercase_ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ : Tuple = False
def a ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__UpperCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__UpperCamelCase : Any = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCamelCase : List[str] = CLIPTextModel(lowerCamelCase__ )
__UpperCamelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCamelCase : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def a ( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : List[str]=0 ):
"""simple docstring"""
if str(lowerCamelCase__ ).startswith("""mps""" ):
__UpperCamelCase : str = torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Optional[Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : List[str] = self.get_dummy_components()
__UpperCamelCase : Optional[int] = LDMTextToImagePipeline(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : List[Any] = self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : List[Any] = pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__UpperCamelCase : int = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def a ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple=torch.floataa , lowerCamelCase__ : List[Any]=0 ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 32, 32) )
__UpperCamelCase : Any = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
__UpperCamelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : Any = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Any = self.get_inputs(lowerCamelCase__ )
__UpperCamelCase : Tuple = pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_56, 2_56, 3)
__UpperCamelCase : Optional[Any] = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
__UpperCamelCase : Union[str, Any] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any]=torch.floataa , lowerCamelCase__ : Tuple=0 ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 32, 32) )
__UpperCamelCase : List[Any] = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : Tuple = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Dict = self.get_inputs(lowerCamelCase__ )
__UpperCamelCase : List[Any] = pipe(**lowerCamelCase__ ).images[0]
__UpperCamelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
__UpperCamelCase : int = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 269 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=13 , _lowerCAmelCase : str=3 , _lowerCAmelCase : str=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : str=224 , _lowerCAmelCase : Tuple=1000 , _lowerCAmelCase : str=[3, 3, 6, 4] , _lowerCAmelCase : str=[48, 56, 112, 220] , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = num_labels
__lowercase = image_size
__lowercase = layer_depths
__lowercase = embed_dims
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1e-5 , )
def _a ( self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : int ) -> Any:
"""simple docstring"""
((__lowercase) , (__lowercase) , (__lowercase)) = self.prepare_config_and_inputs()
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__snake_case :List[Any] = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__snake_case :Tuple = False
__snake_case :List[Any] = False
__snake_case :List[str] = False
__snake_case :Tuple = False
__snake_case :int = False
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = SwiftFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
pass
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.hidden_states
__lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
def _config_zero_init(_lowerCAmelCase : Union[str, Any] ):
__lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1e-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
__lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
__lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : int ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase )
# verify the logits
__lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = []
_a = []
_a = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
_a = len(UpperCamelCase ) if (len(UpperCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(UpperCamelCase ) , '''Postfix'''.center(UpperCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase ) == 0:
stack.append(UpperCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(UpperCamelCase )).ljust(UpperCamelCase ) , (''''''.join(UpperCamelCase )).ljust(UpperCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(UpperCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(UpperCamelCase )).ljust(UpperCamelCase ) , (''''''.join(UpperCamelCase )).ljust(UpperCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(UpperCamelCase ) # return Postfix as str
def snake_case_ (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_a = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCamelCase ) ):
if infix[i] == "(":
_a = ''')''' # change "(" to ")"
elif infix[i] == ")":
_a = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(UpperCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_snake_case : int = input('\nEnter an Infix Equation = ') # Input an Infix equation
_snake_case : List[str] = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 22 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case : Tuple = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_56}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any:
"""simple docstring"""
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase_ ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(lowerCAmelCase_ ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 22 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( __lowerCamelCase):
snake_case__ = ['''image_processor''', '''tokenizer''']
snake_case__ = '''AutoImageProcessor'''
snake_case__ = '''AutoTokenizer'''
def __init__( self : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple ) -> str:
super().__init__(a_ , a_ )
_UpperCamelCase = self.image_processor
def __call__( self : Optional[Any] , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : Any ) -> int:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
_UpperCamelCase = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self : Any , *__UpperCamelCase : Dict , **__UpperCamelCase : Dict ) -> Optional[Any]:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self : List[str] , *__UpperCamelCase : Any , **__UpperCamelCase : Any ) -> str:
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self : str ) -> Union[str, Any]:
return ["input_ids", "attention_mask", "pixel_values"]
| 707 | """simple docstring"""
from typing import Any
import numpy as np
def lowercase ( a__ : np.ndarray ) -> bool:
return np.array_equal(a__ , matrix.conjugate().T )
def lowercase ( a__ : np.ndarray , a__ : np.ndarray ) -> Any:
_UpperCamelCase = v.conjugate().T
_UpperCamelCase = v_star.dot(a__ )
assert isinstance(a__ , np.ndarray )
return (v_star_dot.dot(a__ )) / (v_star.dot(a__ ))
def lowercase ( ) -> None:
_UpperCamelCase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_UpperCamelCase = np.array([[1], [2], [3]] )
assert is_hermitian(a__ ), F'''{a} is not hermitian.'''
print(rayleigh_quotient(a__ , a__ ) )
_UpperCamelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(a__ ), F'''{a} is not hermitian.'''
assert rayleigh_quotient(a__ , a__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 342 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : str = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : List[Any] = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
_UpperCAmelCase : Optional[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Tuple = ['''input_ids''', '''attention_mask''']
__SCREAMING_SNAKE_CASE : Tuple = MBartTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self , snake_case=None , snake_case=None , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=None , snake_case=None , snake_case=None , **snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
vocab_file=snake_case , tokenizer_file=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , src_lang=snake_case , tgt_lang=snake_case , additional_special_tokens=snake_case , **snake_case , )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
snake_case_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
snake_case_ = {
lang_code: self.convert_tokens_to_ids(snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ = src_lang if src_lang is not None else 'en_XX'
snake_case_ = self.convert_tokens_to_ids(self._src_lang )
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
return self._src_lang
@src_lang.setter
def a ( self , snake_case ):
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case , snake_case = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case , snake_case , snake_case , snake_case , **snake_case ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
snake_case_ = src_lang
snake_case_ = self(snake_case , add_special_tokens=snake_case , return_tensors=snake_case , **snake_case )
snake_case_ = self.convert_tokens_to_ids(snake_case )
snake_case_ = tgt_lang_id
return inputs
def a ( self , snake_case , snake_case = "en_XX" , snake_case = None , snake_case = "ro_RO" , **snake_case , ):
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(snake_case , snake_case , **snake_case )
def a ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case ):
snake_case_ = self.convert_tokens_to_ids(snake_case )
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case ):
snake_case_ = self.convert_tokens_to_ids(snake_case )
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case , snake_case = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
snake_case_ = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 362 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_UpperCAmelCase : Dict = """sshleifer/bart-tiny-random"""
_UpperCAmelCase : Optional[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowercase ( unittest.TestCase ):
@cached_property
def a ( self ):
return AutoConfig.from_pretrained(snake_case )
def a ( self ):
snake_case_ , *snake_case_ = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def a ( self ):
snake_case_ , *snake_case_ = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def a ( self ):
snake_case_ , *snake_case_ = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def a ( self ):
snake_case_ , *snake_case_ = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def a ( self ):
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 362 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __lowerCAmelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=a_ , )
def __UpperCAmelCase ( self , _a , _a ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def __UpperCAmelCase ( self , _a , _a ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(a_ )
class __lowerCAmelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=a_ , )
def __UpperCAmelCase ( self , _a , _a ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def __UpperCAmelCase ( self , _a , _a ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(a_ )
def lowercase ( ) -> str:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def lowercase ( ) -> List[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class __lowerCAmelCase ( __lowerCamelCase ):
'''simple docstring'''
@require_beam
def __UpperCAmelCase ( self ):
__a = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__a = DummyBeamDataset(cache_dir=a_ , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(a_ , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
__a = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , a_ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , a_ )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def __UpperCAmelCase ( self ):
import apache_beam as beam
__a = beam.io.parquetio.WriteToParquet
__a = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__a = DummyBeamDataset(cache_dir=a_ , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
__a = partial(a_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
a_ , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
a_ , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
__a = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , a_ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , a_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def __UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__a = DummyBeamDataset(cache_dir=a_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __UpperCAmelCase ( self ):
__a = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__a = NestedBeamDataset(cache_dir=a_ , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(a_ , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
__a = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , a_ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , a_ )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 706 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=_a , )
assert hasattr(self , '''env''' )
def __UpperCAmelCase ( self , _a ):
# configuration for running training on smdistributed Model Parallel
__a = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__a = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__a = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__a = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version='''py36''' , )
def __UpperCAmelCase ( self , _a ):
TrainingJobAnalytics(_a ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self , _a ):
# create estimator
__a = self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
__a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _a )
| 65 | 0 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Union[str, Any] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : int = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : List[str] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[Any] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __snake_case ( lowerCAmelCase : int ):
__UpperCAmelCase = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCAmelCase )
return [m.group(0 ) for m in matches]
def __snake_case ( ):
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(lowerCAmelCase )
__UpperCAmelCase = collections.defaultdict(lowerCAmelCase )
__UpperCAmelCase = collections.defaultdict(lowerCAmelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowerCAmelCase ):
__UpperCAmelCase = None
if _re_tf_models.match(lowerCAmelCase ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''.join(camel_case_split(lowerCAmelCase )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(lowerCAmelCase )
all_models.sort()
__UpperCAmelCase = {'model_type': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = 'AutoTokenizer'
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(lowerCAmelCase )
def __snake_case ( lowerCAmelCase : Any ):
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowerCAmelCase , lowerCAmelCase ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(lowerCAmelCase , lowerCAmelCase ).values():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
model_names.append(lowerCAmelCase )
else:
model_names.extend(list(lowerCAmelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __snake_case ( lowerCAmelCase : str , lowerCAmelCase : int ):
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(lowerCAmelCase )
__UpperCAmelCase = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowerCAmelCase )
__UpperCAmelCase = Dataset.from_json(lowerCAmelCase )
__UpperCAmelCase = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowerCAmelCase ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(lowerCAmelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowerCAmelCase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowerCAmelCase , 'pipeline_tags.json' ) )
if commit_sha is not None:
__UpperCAmelCase = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowerCAmelCase , repo_type='dataset' , token=lowerCAmelCase , commit_message=lowerCAmelCase , )
def __snake_case ( ):
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['pt']
if isinstance(lowerCAmelCase , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
__UpperCAmelCase = ', '.join(lowerCAmelCase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Optional[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 396 | '''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_UpperCamelCase : Any = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
_UpperCamelCase : List[Any] = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
_UpperCamelCase : List[str] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowercase( datasets.Metric ):
"""simple docstring"""
def snake_case ( self: int ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ,id='token' ) ,id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' ,id='token' ) ,id='sequence' ) ,id='references' ),
} ) ,)
def snake_case ( self: Dict ,a: List[List[List[str]]] ,a: List[List[str]] ,a: int = 1 ,a: int = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a ,hypotheses=a ,min_len=a ,max_len=a )
}
| 396 | 1 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__lowerCAmelCase = 5_0_0_0_0_0
__lowerCAmelCase , __lowerCAmelCase = os.path.split(__file__)
__lowerCAmelCase = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( _lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = dataset.map(**_lowerCAmelCase )
@get_duration
def __lowerCamelCase ( _lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = dataset.filter(**_lowerCAmelCase )
def __lowerCamelCase ( ) -> Union[str, Any]:
_UpperCAmelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
_UpperCAmelCase = generate_example_dataset(
os.path.join(_lowerCAmelCase , "dataset.arrow" ) , _lowerCAmelCase , num_examples=_lowerCAmelCase )
_UpperCAmelCase = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=_lowerCAmelCase )
def tokenize(_lowerCAmelCase ):
return tokenizer(examples["text"] )
_UpperCAmelCase = map(_lowerCAmelCase )
_UpperCAmelCase = map(_lowerCAmelCase , batched=_lowerCAmelCase )
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="numpy" ):
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="pandas" ):
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="torch" , columns="numbers" ):
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
_UpperCAmelCase = map(_lowerCAmelCase , function=_lowerCAmelCase , batched=_lowerCAmelCase )
_UpperCAmelCase = filter(_lowerCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_lowerCAmelCase , "wb" ) as f:
f.write(json.dumps(_lowerCAmelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 721 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = "▁"
__lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__lowerCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__lowerCAmelCase = {
"facebook/xglm-564M": 2_0_4_8,
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : str="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Tuple="</s>" , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : List[str] , ):
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_UpperCAmelCase = 7
_UpperCAmelCase = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
_UpperCAmelCase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , __UpperCamelCase : str ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_UpperCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase__ ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = "".join(__UpperCamelCase ).replace(__UpperCamelCase , " " ).strip()
return out_string
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 129 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =tempfile.mkdtemp()
__UpperCAmelCase =8
# DPR tok
__UpperCAmelCase =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__UpperCAmelCase =os.path.join(self.tmpdirname , '''dpr_tokenizer''')
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase)
__UpperCAmelCase =os.path.join(UpperCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
# BART tok
__UpperCAmelCase =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCAmelCase =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase))))
__UpperCAmelCase =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCAmelCase ={'''unk_token''': '''<unk>'''}
__UpperCAmelCase =os.path.join(self.tmpdirname , '''bart_tokenizer''')
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase)
__UpperCAmelCase =os.path.join(UpperCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCAmelCase =os.path.join(UpperCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(UpperCAmelCase) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(UpperCAmelCase))
def A__ (self):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer'''))
def A__ (self):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer'''))
def A__ (self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =os.path.join(self.tmpdirname , '''rag_tokenizer''')
__UpperCAmelCase =RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict())
__UpperCAmelCase =RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer())
rag_config.save_pretrained(UpperCAmelCase)
rag_tokenizer.save_pretrained(UpperCAmelCase)
__UpperCAmelCase =RagTokenizer.from_pretrained(UpperCAmelCase , config=UpperCAmelCase)
self.assertIsInstance(new_rag_tokenizer.question_encoder , UpperCAmelCase)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator , UpperCAmelCase)
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab())
@slow
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =RagTokenizer.from_pretrained('''facebook/rag-token-nq''')
__UpperCAmelCase =[
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__UpperCAmelCase =tokenizer(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
@slow
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''')
__UpperCAmelCase =[
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__UpperCAmelCase =tokenizer(UpperCAmelCase)
self.assertIsNotNone(UpperCAmelCase)
| 132 |
'''simple docstring'''
from math import isqrt
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int ) -> bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2, isqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int = 10**6 ) -> int:
"""simple docstring"""
__a = 0
__a = 1
__a = 7
while prime_candidate < max_prime:
primes_count += is_prime(SCREAMING_SNAKE_CASE__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""") | 448 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = '''▁'''
UpperCamelCase = {'''vocab_file''': '''prophetnet.tokenizer'''}
UpperCamelCase = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
UpperCamelCase = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
UpperCamelCase = {
'''microsoft/xprophetnet-large-wiki100-cased''': 512,
}
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(snake_case__ ,"""r""" ,encoding="""utf-8""" ) as reader:
_SCREAMING_SNAKE_CASE = reader.readlines()
for index, token in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = token.rstrip("""\n""" )
_SCREAMING_SNAKE_CASE = index
return vocab
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : str = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Tuple = ["input_ids", "attention_mask"]
def __init__( self: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Optional[Any]="[SEP]" , UpperCAmelCase_: Union[str, Any]="[SEP]" , UpperCAmelCase_: int="[SEP]" , UpperCAmelCase_: str="[UNK]" , UpperCAmelCase_: List[str]="[PAD]" , UpperCAmelCase_: Tuple="[CLS]" , UpperCAmelCase_: List[Any]="[MASK]" , UpperCAmelCase_: Optional[Dict[str, Any]] = None , **UpperCAmelCase_: List[Any] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_SCREAMING_SNAKE_CASE = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(10 ):
_SCREAMING_SNAKE_CASE = F'[unused{i}]'
_SCREAMING_SNAKE_CASE = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase_ )
def __getstate__( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self: List[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None , UpperCAmelCase_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_ )) + [1]
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: int ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """""".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , """ """ ).strip()
return out_string
def UpperCamelCase ( self: Any , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 701 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_SCREAMING_SNAKE_CASE = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
_SCREAMING_SNAKE_CASE = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
_SCREAMING_SNAKE_CASE = shift_tokens_right(UpperCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ ).logits
_SCREAMING_SNAKE_CASE = optax.softmax_cross_entropy(UpperCAmelCase_ , onehot(UpperCAmelCase_ , logits.shape[-1] ) ).mean()
_SCREAMING_SNAKE_CASE = -(labels.shape[-1] * loss.item())
_SCREAMING_SNAKE_CASE = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 569 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
SCREAMING_SNAKE_CASE = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
SCREAMING_SNAKE_CASE = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def a ():
__a = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__a = bs[:]
__a = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
__a = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def a (lowerCAmelCase__ ):
__a = set()
__a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a = char
return pairs
class __UpperCAmelCase ( a__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __A , __A , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , **__A , ):
__a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
__a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
__a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
__a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
__a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , )
with open(__a , encoding="""utf-8""" ) as vocab_handle:
__a = json.load(__a )
__a = {v: k for k, v in self.encoder.items()}
__a = errors # how to handle errors in decoding
__a = bytes_to_unicode()
__a = {v: k for k, v in self.byte_encoder.items()}
with open(__a , encoding="""utf-8""" ) as merges_handle:
__a = merges_handle.read().split("""\n""" )[1:-1]
__a = [tuple(merge.split() ) for merge in bpe_merges]
__a = dict(zip(__a , range(len(__a ) ) ) )
__a = {}
__a = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__a = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case_ ( self ):
return len(self.encoder )
def snake_case_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self , __A ):
if token in self.cache:
return self.cache[token]
__a = tuple(__a )
__a = get_pairs(__a )
if not pairs:
return token
while True:
__a = min(__a , key=lambda __A : self.bpe_ranks.get(__a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__a = bigram
__a = []
__a = 0
while i < len(__a ):
try:
__a = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a = tuple(__a )
__a = new_word
if len(__a ) == 1:
break
else:
__a = get_pairs(__a )
__a = """ """.join(__a )
__a = word
return word
def snake_case_ ( self , __A ):
__a = []
for token in re.findall(self.pat , __a ):
__a = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(""" """ ) )
return bpe_tokens
def snake_case_ ( self , __A ):
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def snake_case_ ( self , __A ):
return self.decoder.get(__a )
def snake_case_ ( self , __A ):
__a = """""".join(__a )
__a = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def snake_case_ ( self , __A , __A = None ):
if not os.path.isdir(__a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__a = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + """\n""" )
__a = 0
with open(__a , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
__a = token_index
writer.write(""" """.join(__a ) + """\n""" )
index += 1
return vocab_file, merge_file
def snake_case_ ( self , __A , __A = None , __A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case_ ( self , __A , __A = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , __A , __A=False , **__A ):
__a = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()):
__a = """ """ + text
return (text, kwargs)
def snake_case_ ( self , __A , __A = None ):
return token_ids_a + [self.eos_token_id]
def snake_case_ ( self , __A ):
__a = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(__a )
__a = """ """.join(__a )
__a = self.encode(__a )
if len(__a ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 99 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase : Any = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=3_0522, type=int)
lowercase : str = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowercase : int = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowercase : List[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase : int = [0] * args.vocab_size
for k, v in counter.items():
lowercase : List[Any] = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 116 | 0 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase__ :
def __init__( self : Dict , snake_case : Optional[int]=None , snake_case : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
A = list(poly_a or [0] )[:]
A = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
A = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
A = len(self.polyB )
# Add 0 to make lengths equal a power of 2
A = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
A = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
A = self.__multiply()
def A_ ( self : int , snake_case : Dict ) -> List[str]:
'''simple docstring'''
A = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(snake_case ) <= 1:
return dft[0]
#
A = self.c_max_length // 2
while next_ncol > 0:
A = [[] for i in range(snake_case )]
A = self.root**next_ncol
# First half of next step
A = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(snake_case ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
A = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(snake_case ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
A = new_dft
A = next_ncol // 2
return dft[0]
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
A = self.__dft('A' )
A = self.__dft('B' )
A = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
A = 2
while next_ncol <= self.c_max_length:
A = [[] for i in range(snake_case )]
A = self.root ** (next_ncol // 2)
A = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
A = new_inverse_c
next_ncol *= 2
# Unpack
A = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Optional[int] ) -> Tuple:
'''simple docstring'''
A = 'A = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
A = 'B = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
A = 'A*B = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def lowerCAmelCase__ ( lowerCamelCase__ ) -> Union[str, Any]:
A = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
A = re.match(R'^mobilenet_v1_([^_]*)_([^_]*)$' , lowerCamelCase__ )
if matches:
A = float(matches[1] )
A = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A = 1001
A = 'imagenet-1k-id2label.json'
A = 'huggingface/label-files'
A = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' ) , 'r' ) )
A = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A = 'background'
A = idalabel
A = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( ) -> List[Any]:
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
A = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
A = image_processor(images=prepare_img() , return_tensors='pt' )
A = model(**lowerCamelCase__ )
A = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
A = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
A = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
A = 'google/' + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 109 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def lowercase_ ( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Dict = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Optional[int] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ , {
'score': ANY(SCREAMING_SNAKE_CASE_ ),
'label': ANY(SCREAMING_SNAKE_CASE_ ),
'box': {'xmin': ANY(SCREAMING_SNAKE_CASE_ ), 'ymin': ANY(SCREAMING_SNAKE_CASE_ ), 'xmax': ANY(SCREAMING_SNAKE_CASE_ ), 'ymax': ANY(SCREAMING_SNAKE_CASE_ )},
} , )
import datasets
__lowerCamelCase : Any = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__lowerCamelCase : str = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__lowerCamelCase : Any = object_detector(SCREAMING_SNAKE_CASE_ , threshold=0.0 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for outputs in batch_outputs:
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ , {
'score': ANY(SCREAMING_SNAKE_CASE_ ),
'label': ANY(SCREAMING_SNAKE_CASE_ ),
'box': {'xmin': ANY(SCREAMING_SNAKE_CASE_ ), 'ymin': ANY(SCREAMING_SNAKE_CASE_ ), 'xmax': ANY(SCREAMING_SNAKE_CASE_ ), 'ymax': ANY(SCREAMING_SNAKE_CASE_ )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def lowercase_ ( self ) -> Any:
pass
@require_torch
def lowercase_ ( self ) -> str:
__lowerCamelCase : Union[str, Any] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__lowerCamelCase : Optional[Any] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
] , )
__lowerCamelCase : Tuple = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : int = 'facebook/detr-resnet-50'
__lowerCamelCase : Dict = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
__lowerCamelCase : Dict = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Optional[int] = 'facebook/detr-resnet-50'
__lowerCamelCase : str = pipeline('object-detection' , model=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
__lowerCamelCase : str = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : List[Any] = 0.9_9_8_5
__lowerCamelCase : Optional[int] = 'facebook/detr-resnet-50'
__lowerCamelCase : int = pipeline('object-detection' , model=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = 'Narsil/layoutlmv3-finetuned-funsd'
__lowerCamelCase : Dict = 0.9_9_9_3
__lowerCamelCase : int = pipeline('object-detection' , model=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
] , )
| 13 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( __magic_name__ :Union[str, Any] , __magic_name__ :Union[str, Any] , __magic_name__ :List[Any] ):
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__magic_name__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__magic_name__ )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __magic_name__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 121 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : np.ndarray , __magic_name__ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase :Tuple = np.zeros_like(__magic_name__ )
UpperCamelCase :Optional[Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCamelCase :str = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCamelCase :Dict = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCamelCase :List[Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
UpperCAmelCase_ : Union[str, Any] = np.array(Image.open(lena_path))
# kernel to be applied
UpperCAmelCase_ : Tuple = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCAmelCase_ : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCAmelCase_ : Tuple = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 590 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any = """trocr"""
snake_case__ : str = ["""past_key_values"""]
snake_case__ : str = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : List[str] , __lowerCamelCase : int=50_265 , __lowerCamelCase : Tuple=1_024 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : int=4_096 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=True , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : int=2 , **__lowerCamelCase : Dict , ):
UpperCamelCase :Optional[Any] = vocab_size
UpperCamelCase :str = d_model
UpperCamelCase :Dict = decoder_layers
UpperCamelCase :Tuple = decoder_attention_heads
UpperCamelCase :Tuple = decoder_ffn_dim
UpperCamelCase :List[Any] = activation_function
UpperCamelCase :Dict = max_position_embeddings
UpperCamelCase :Optional[Any] = dropout
UpperCamelCase :List[str] = attention_dropout
UpperCamelCase :int = activation_dropout
UpperCamelCase :List[str] = init_std
UpperCamelCase :int = decoder_layerdrop
UpperCamelCase :List[Any] = use_cache
UpperCamelCase :Optional[Any] = scale_embedding
UpperCamelCase :Any = use_learned_position_embeddings
UpperCamelCase :Tuple = layernorm_embedding
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 590 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __lowercase ( __lowerCamelCase ):
def __init__( self : int ,A : Optional[NestedDataStructureLike[PathLike]] = None ,A : Optional[NamedSplit] = None ,A : Optional[Features] = None ,A : str = None ,A : bool = False ,A : bool = False ,A : Optional[int] = None ,**A : List[Any] ,):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = path_or_paths
UpperCAmelCase__ : Dict = split if split or isinstance(A ,A ) else """train"""
UpperCAmelCase__ : Dict = features
UpperCAmelCase__ : Dict = cache_dir
UpperCAmelCase__ : Tuple = keep_in_memory
UpperCAmelCase__ : Optional[int] = streaming
UpperCAmelCase__ : Any = num_proc
UpperCAmelCase__ : List[Any] = kwargs
@abstractmethod
def __lowercase ( self : Any ):
'''simple docstring'''
pass
class __lowercase ( __lowerCamelCase ):
def __init__( self : Optional[int] ,A : Optional[Features] = None ,A : str = None ,A : bool = False ,A : bool = False ,A : Optional[int] = None ,**A : Optional[int] ,):
'''simple docstring'''
UpperCAmelCase__ : List[str] = features
UpperCAmelCase__ : str = cache_dir
UpperCAmelCase__ : Union[str, Any] = keep_in_memory
UpperCAmelCase__ : Optional[Any] = streaming
UpperCAmelCase__ : Optional[Any] = num_proc
UpperCAmelCase__ : Dict = kwargs
@abstractmethod
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
| 65 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a : Any = {
"allenai/led-base-16384": 1_63_84,
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : str = pre_tok_class(**snake_case )
UpperCAmelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase : Dict = "post_processor"
UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
UpperCAmelCase : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
UpperCAmelCase : Tuple = trim_offsets
UpperCAmelCase : List[str] = True
if changes_to_apply:
UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) )
UpperCAmelCase : Tuple = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase : Optional[Any] = value
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 679 | 0 |
from scipy.stats import pearsonr
import datasets
UpperCamelCase__ : Any = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
UpperCamelCase__ : int = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
UpperCamelCase__ : List[str] = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) ,reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__=False ):
if return_pvalue:
SCREAMING_SNAKE_CASE_ : int = pearsonr(snake_case__ ,snake_case__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(snake_case__ ,snake_case__ )[0] )}
| 719 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCAmelCase: List[str] ="pytorch_model.bin"
lowerCAmelCase: Tuple ="pytorch_model.bin.index.json"
lowerCAmelCase: List[str] ="adapter_config.json"
lowerCAmelCase: str ="adapter_model.bin"
lowerCAmelCase: str ="adapter_model.safetensors"
lowerCAmelCase: int ="tf_model.h5"
lowerCAmelCase: Optional[int] ="tf_model.h5.index.json"
lowerCAmelCase: str ="model.ckpt"
lowerCAmelCase: Optional[Any] ="flax_model.msgpack"
lowerCAmelCase: int ="flax_model.msgpack.index.json"
lowerCAmelCase: List[Any] ="model.safetensors"
lowerCAmelCase: Optional[int] ="model.safetensors.index.json"
lowerCAmelCase: Tuple ="config.json"
lowerCAmelCase: Any ="preprocessor_config.json"
lowerCAmelCase: Optional[Any] =FEATURE_EXTRACTOR_NAME
lowerCAmelCase: Union[str, Any] ="generation_config.json"
lowerCAmelCase: Union[str, Any] ="modelcard.json"
lowerCAmelCase: Optional[int] ="▁"
lowerCAmelCase: List[Any] =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCAmelCase: Optional[Any] =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCAmelCase: Union[str, Any] =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCAmelCase: Any =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __snake_case ( __A ) -> List[str]:
if version.parse(__A ) < version.parse(__A ):
if "dev" in min_version:
lowercase : List[str] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowercase : Union[str, Any] = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 607 |
"""simple docstring"""
import math
import qiskit
def __snake_case ( __A = 1 ,__A = 1 ,__A = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(__A ,__A )
or isinstance(__A ,__A )
or isinstance(__A ,__A )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(__A ) != input_a)
or (math.floor(__A ) != input_a)
or (math.floor(__A ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
lowercase : Any = qiskit.QuantumRegister(4 ,"""qr""" )
lowercase : Tuple = qiskit.ClassicalRegister(2 ,"""cr""" )
# list the entries
lowercase : Optional[Any] = [input_a, input_a, carry_in]
lowercase : int = qiskit.QuantumCircuit(__A ,__A )
for i in range(0 ,3 ):
if entry[i] == 2:
quantum_circuit.h(__A ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__A ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__A ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 ,1 ,3 ) # ccx = toffoli gate
quantum_circuit.cx(0 ,1 )
quantum_circuit.ccx(1 ,2 ,3 )
quantum_circuit.cx(1 ,2 )
quantum_circuit.cx(0 ,1 )
quantum_circuit.measure([2, 3] ,__A ) # measure the last two qbits
lowercase : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
lowercase : List[str] = qiskit.execute(__A ,__A ,shots=1000 )
return job.result().get_counts(__A )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 607 | 1 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCamelCase ( tf.keras.layers.Layer ):
def __init__(self : Union[str, Any] , _A : Dict[str, int] , _A : List[str] , _A : int = None , _A : int = None ) -> Optional[int]:
super().__init__()
snake_case = pad_token_id
snake_case = max_length
snake_case = vocab
snake_case = merges
snake_case = BytePairTokenizer(_A , _A , sequence_length=_A )
@classmethod
def UpperCAmelCase(cls : str , _A : GPTaTokenizer , *_A : Optional[Any] , **_A : List[Any] ) -> Dict:
snake_case = [" ".join(_A ) for m in tokenizer.bpe_ranks.keys()]
snake_case = tokenizer.get_vocab()
return cls(_A , _A , *_A , **_A )
@classmethod
def UpperCAmelCase(cls : str , _A : Union[str, os.PathLike] , *_A : int , **_A : Any ) -> str:
snake_case = GPTaTokenizer.from_pretrained(_A , *_A , **_A )
return cls.from_tokenizer(_A , *_A , **_A )
@classmethod
def UpperCAmelCase(cls : int , _A : List[Any] ) -> Union[str, Any]:
return cls(**_A )
def UpperCAmelCase(self : Any ) -> Optional[Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase(self : Tuple , _A : str , _A : int = None ) -> Union[str, Any]:
snake_case = self.tf_tokenizer(_A )
snake_case = tf.ones_like(_A )
if self.pad_token_id is not None:
# pad the tokens up to max length
snake_case = max_length if max_length is not None else self.max_length
if max_length is not None:
snake_case , snake_case = pad_model_inputs(
_A , max_seq_length=_A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 294 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__(self : Optional[int] , _A : Dict , _A : Optional[int]=7 , _A : Union[str, Any]=3 , _A : Tuple=1_8 , _A : str=3_0 , _A : Union[str, Any]=4_0_0 , _A : Optional[Any]=True , _A : Union[str, Any]=None , _A : str=True , _A : Optional[int]=None , ) -> int:
snake_case = size if size is not None else {"shortest_edge": 2_0}
snake_case = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = do_center_crop
snake_case = crop_size
def UpperCAmelCase(self : str ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase ( A_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase(self : Dict ) -> List[Any]:
snake_case = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase(self : Union[str, Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase(self : Tuple ) -> Tuple:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , "do_resize" ) )
self.assertTrue(hasattr(_A , "size" ) )
self.assertTrue(hasattr(_A , "do_center_crop" ) )
self.assertTrue(hasattr(_A , "crop_size" ) )
def UpperCAmelCase(self : int ) -> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def UpperCAmelCase(self : Any ) -> Tuple:
pass
def UpperCAmelCase(self : Tuple ) -> Tuple:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase(self : Tuple ) -> List[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase(self : List[str] ) -> List[str]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 294 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : Dict=6 , UpperCamelCase__ : Union[str, Any]=17 , UpperCamelCase__ : str=23 , UpperCamelCase__ : Union[str, Any]=11 , UpperCamelCase__ : Dict=True , ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = parent
snake_case : int = batch_size
snake_case : int = seq_length
snake_case : str = act_dim
snake_case : List[str] = state_dim
snake_case : List[str] = hidden_size
snake_case : List[Any] = max_length
snake_case : int = is_training
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
snake_case : List[str] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
snake_case : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
snake_case : List[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
snake_case : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
snake_case : Tuple = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
snake_case : List[str] = random_attention_mask((self.batch_size, self.seq_length) )
snake_case : int = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ) -> Dict:
"""simple docstring"""
snake_case : Tuple = DecisionTransformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Optional[int] = model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
snake_case : Dict = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) : List[str] = config_and_inputs
snake_case : Dict = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCamelCase = ()
lowerCamelCase = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCamelCase = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
snake_case : Union[str, Any] = DecisionTransformerModelTester(self )
snake_case : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : str = DecisionTransformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
snake_case ,snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(UpperCamelCase__ )
snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Optional[int] = [*signature.parameters.keys()]
snake_case : Optional[Any] = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case : Any = 2 # number of steps of autoregressive prediction we will perform
snake_case : Dict = 10 # defined by the RL environment, may be normalized
snake_case : int = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
snake_case : Optional[int] = model.to(UpperCamelCase__ )
snake_case : str = model.config
torch.manual_seed(0 )
snake_case : int = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase__ , dtype=torch.floataa ) # env.reset()
snake_case : List[Any] = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=UpperCamelCase__ )
snake_case : Union[str, Any] = torch.tensor(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
snake_case : str = state
snake_case : Optional[Any] = torch.zeros(1 , 0 , config.act_dim , device=UpperCamelCase__ , dtype=torch.floataa )
snake_case : Optional[Any] = torch.zeros(1 , 0 , device=UpperCamelCase__ , dtype=torch.floataa )
snake_case : Any = torch.tensor(0 , device=UpperCamelCase__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCamelCase__ ):
snake_case : List[str] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCamelCase__ )] , dim=1 )
snake_case : Optional[Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCamelCase__ )] , dim=1 )
snake_case : int = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
snake_case ,snake_case ,snake_case : int = model(
states=UpperCamelCase__ , actions=UpperCamelCase__ , rewards=UpperCamelCase__ , returns_to_go=UpperCamelCase__ , timesteps=UpperCamelCase__ , attention_mask=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
snake_case ,snake_case ,snake_case ,snake_case : Union[str, Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase__ , dtype=torch.floataa ),
1.0,
False,
{},
)
snake_case : List[str] = action_pred[0, -1]
snake_case : Optional[int] = torch.cat([states, state] , dim=1 )
snake_case : List[Any] = returns_to_go[0, -1] - reward
snake_case : str = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
snake_case : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCamelCase__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 638 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Tuple:
'''simple docstring'''
if "resnet-50" in model_name:
snake_case : int = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
snake_case : Any = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
snake_case : Optional[Any] = DetrConfig(use_timm_backbone=SCREAMING_SNAKE_CASE__ , backbone_config=SCREAMING_SNAKE_CASE__ )
# set label attributes
snake_case : Tuple = '''panoptic''' in model_name
if is_panoptic:
snake_case : Tuple = 250
else:
snake_case : Any = 91
snake_case : str = '''huggingface/label-files'''
snake_case : Optional[Any] = '''coco-detection-id2label.json'''
snake_case : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case : Optional[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case : str = idalabel
snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
F'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
F'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = val
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = ''''''
if is_panoptic:
snake_case : Optional[Any] = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case : Dict = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
snake_case : Optional[Any] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case : Union[str, Any] = in_proj_weight[:256, :]
snake_case : Any = in_proj_bias[:256]
snake_case : Dict = in_proj_weight[256:512, :]
snake_case : Tuple = in_proj_bias[256:512]
snake_case : Optional[int] = in_proj_weight[-256:, :]
snake_case : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case : Union[str, Any] = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
snake_case : List[str] = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case : Union[str, Any] = in_proj_weight[:256, :]
snake_case : str = in_proj_bias[:256]
snake_case : Optional[int] = in_proj_weight[256:512, :]
snake_case : Optional[int] = in_proj_bias[256:512]
snake_case : Optional[Any] = in_proj_weight[-256:, :]
snake_case : Tuple = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
snake_case : Union[str, Any] = state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
snake_case : str = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case : Optional[Any] = in_proj_weight_cross_attn[:256, :]
snake_case : List[Any] = in_proj_bias_cross_attn[:256]
snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :]
snake_case : Any = in_proj_bias_cross_attn[256:512]
snake_case : List[str] = in_proj_weight_cross_attn[-256:, :]
snake_case : Tuple = in_proj_bias_cross_attn[-256:]
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False ) -> Dict:
'''simple docstring'''
snake_case ,snake_case : Tuple = get_detr_config(SCREAMING_SNAKE_CASE__ )
# load original model from torch hub
snake_case : Union[str, Any] = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F'Converting model {model_name}...' )
snake_case : Union[str, Any] = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=SCREAMING_SNAKE_CASE__ ).eval()
snake_case : Optional[int] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(SCREAMING_SNAKE_CASE__ ):
if is_panoptic:
snake_case : Union[str, Any] = '''detr.''' + src
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , is_panoptic=SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case : List[str] = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
snake_case : Optional[Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
snake_case : int = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
snake_case : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case : Any = val
# finally, create HuggingFace model and load state dict
snake_case : Optional[Any] = DetrForSegmentation(SCREAMING_SNAKE_CASE__ ) if is_panoptic else DetrForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify our conversion on an image
snake_case : List[str] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
snake_case : Optional[int] = DetrImageProcessor(format=SCREAMING_SNAKE_CASE__ )
snake_case : int = processor(images=prepare_img() , return_tensors='''pt''' )
snake_case : Optional[int] = encoding['''pixel_values''']
snake_case : str = detr(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F'nielsr/{model_name}' )
processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
lowercase__ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase_ :
@staticmethod
def lowercase ( *lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
__lowercase : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCamelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = vqa_pipeline(lowerCamelCase_ , top_k=1 )
self.assertEqual(
lowerCamelCase_ , [
[{"score": ANY(lowerCamelCase_ ), "answer": ANY(lowerCamelCase_ )}],
[{"score": ANY(lowerCamelCase_ ), "answer": ANY(lowerCamelCase_ )}],
] , )
@require_torch
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCamelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCamelCase = "How many cats are there?"
_UpperCamelCase = vqa_pipeline(image=lowerCamelCase_ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
lowerCamelCase_ , [{"score": ANY(lowerCamelCase_ ), "answer": ANY(lowerCamelCase_ )}, {"score": ANY(lowerCamelCase_ ), "answer": ANY(lowerCamelCase_ )}] )
_UpperCamelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
lowerCamelCase_ , [{"score": ANY(lowerCamelCase_ ), "answer": ANY(lowerCamelCase_ )}, {"score": ANY(lowerCamelCase_ ), "answer": ANY(lowerCamelCase_ )}] )
@slow
@require_torch
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCamelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCamelCase = "How many cats are there?"
_UpperCamelCase = vqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
_UpperCamelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
_UpperCamelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowercase ( self ) -> Any:
"""simple docstring"""
pass
| 589 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
__lowerCAmelCase = True
from torch.cuda.amp import autocast
__lowerCAmelCase = logging.getLogger(__name__)
def _lowercase ( a__ : List[str]=None , a__ : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=a__ )
@dataclass
class lowerCamelCase_ :
__lowercase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowercase : Optional[str] = field(
default=lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__lowercase : Optional[bool] = field(
default=lowercase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__lowercase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
__lowercase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
__lowercase : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
__lowercase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
__lowercase : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
__lowercase : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCamelCase_ :
__lowercase : Optional[str] = field(
default=lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__lowercase : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__lowercase : bool = field(
default=lowercase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__lowercase : Optional[int] = field(
default=lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__lowercase : Optional[int] = field(
default=lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__lowercase : Optional[int] = field(
default=lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
__lowercase : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class lowerCamelCase_ :
__lowercase : WavaVecaProcessor
__lowercase : Union[bool, str] = True
__lowercase : Optional[int] = None
__lowercase : Optional[int] = None
__lowercase : Optional[int] = None
__lowercase : Optional[int] = None
def __call__( self , lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCamelCase = [{"input_values": feature["input_values"]} for feature in features]
_UpperCamelCase = [{"input_ids": feature["labels"]} for feature in features]
_UpperCamelCase = self.processor.pad(
lowerCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
_UpperCamelCase = self.processor.pad(
labels=lowerCamelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
_UpperCamelCase = labels
return batch
class lowerCamelCase_ ( lowercase ):
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> torch.Tensor:
"""simple docstring"""
model.train()
_UpperCamelCase = self._prepare_inputs(lowerCamelCase_ )
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(lowerCamelCase_ , lowerCamelCase_ )
else:
_UpperCamelCase = self.compute_loss(lowerCamelCase_ , lowerCamelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase_ )
else:
loss.backward()
return loss.detach()
def _lowercase ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , a__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
_UpperCamelCase = f'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(a__ : Tuple ):
_UpperCamelCase = re.sub(a__ , "" , batch["sentence"] ).lower() + " "
return batch
_UpperCamelCase = train_dataset.map(a__ , remove_columns=["sentence"] )
_UpperCamelCase = eval_dataset.map(a__ , remove_columns=["sentence"] )
def extract_all_chars(a__ : Tuple ):
_UpperCamelCase = " ".join(batch["text"] )
_UpperCamelCase = list(set(a__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase = train_dataset.map(
a__ , batched=a__ , batch_size=-1 , keep_in_memory=a__ , remove_columns=train_dataset.column_names , )
_UpperCamelCase = train_dataset.map(
a__ , batched=a__ , batch_size=-1 , keep_in_memory=a__ , remove_columns=eval_dataset.column_names , )
_UpperCamelCase = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
_UpperCamelCase = {v: k for k, v in enumerate(a__ )}
_UpperCamelCase = vocab_dict[" "]
del vocab_dict[" "]
_UpperCamelCase = len(a__ )
_UpperCamelCase = len(a__ )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(a__ , a__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=a__ , return_attention_mask=a__ )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
_UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(a__ ) , data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(a__ ) )
if data_args.max_val_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(a__ : List[Any] ):
_UpperCamelCase , _UpperCamelCase = torchaudio.load(batch["path"] )
_UpperCamelCase = resampler(a__ ).squeeze().numpy()
_UpperCamelCase = 1_60_00
_UpperCamelCase = batch["text"]
return batch
_UpperCamelCase = train_dataset.map(
a__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
a__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(a__ : Dict ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), f'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
_UpperCamelCase = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(a__ )
return batch
_UpperCamelCase = train_dataset.map(
a__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=a__ , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
a__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=a__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase = datasets.load_metric("wer" )
def compute_metrics(a__ : Union[str, Any] ):
_UpperCamelCase = pred.predictions
_UpperCamelCase = np.argmax(a__ , axis=-1 )
_UpperCamelCase = processor.tokenizer.pad_token_id
_UpperCamelCase = processor.batch_decode(a__ )
# we do not want to group tokens when computing the metrics
_UpperCamelCase = processor.batch_decode(pred.label_ids , group_tokens=a__ )
_UpperCamelCase = wer_metric.compute(predictions=a__ , references=a__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase = DataCollatorCTCWithPadding(processor=a__ , padding=a__ )
# Initialize our Trainer
_UpperCamelCase = CTCTrainer(
model=a__ , data_collator=a__ , args=a__ , compute_metrics=a__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model()
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a__ )
)
_UpperCamelCase = min(a__ , len(a__ ) )
trainer.log_metrics("train" , a__ )
trainer.save_metrics("train" , a__ )
trainer.save_state()
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(a__ )
_UpperCamelCase = min(a__ , len(a__ ) )
trainer.log_metrics("eval" , a__ )
trainer.save_metrics("eval" , a__ )
return results
if __name__ == "__main__":
main()
| 589 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.