code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : Optional[Any] , ):
a : List[str] = parent
a : Optional[Any] = 13
a : Dict = 7
a : Dict = 30
a : Optional[int] = self.seq_length + self.mem_len
a : Optional[int] = 15
a : Dict = True
a : List[str] = True
a : Union[str, Any] = 99
a : Any = [10, 50, 80]
a : Dict = 32
a : Dict = 32
a : Optional[int] = 4
a : List[Any] = 8
a : List[Any] = 128
a : Optional[int] = 2
a : List[Any] = 2
a : Tuple = None
a : str = 1
a : Dict = 0
a : Any = 3
a : Any = self.vocab_size - 1
a : Optional[int] = 0.01
def __snake_case ( self : Union[str, Any]):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Any = None
if self.use_labels:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Tuple = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __snake_case ( self : List[str]):
random.seed(self.seed)
tf.random.set_seed(self.seed)
def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int):
a : List[str] = TFTransfoXLModel(__UpperCAmelCase)
a , a : List[str] = model(__UpperCAmelCase).to_tuple()
a : Optional[Any] = {"input_ids": input_ids_a, "mems": mems_a}
a , a : Union[str, Any] = model(__UpperCAmelCase).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]):
a : List[Any] = TFTransfoXLLMHeadModel(__UpperCAmelCase)
a , a : Optional[Any] = model(__UpperCAmelCase).to_tuple()
a : Optional[int] = {"input_ids": input_ids_a, "labels": lm_labels}
a , a : Optional[int] = model(__UpperCAmelCase).to_tuple()
a , a : Optional[int] = model([input_ids_a, mems_a]).to_tuple()
a : Tuple = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
a , a : List[str] = model(__UpperCAmelCase).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __snake_case ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]):
a : Dict = TFTransfoXLForSequenceClassification(__UpperCAmelCase)
a : int = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : Union[str, Any]):
a : Optional[Any] = self.prepare_config_and_inputs()
((a) , (a) , (a) , (a)) : List[Any] = config_and_inputs
a : List[str] = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase : List[Any] = () if is_tf_available() else ()
UpperCAmelCase : Tuple = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCAmelCase : Any = False
UpperCAmelCase : int = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Any = False
def __snake_case ( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int]):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __snake_case ( self : Any):
a : Any = TFTransfoXLModelTester(self)
a : Any = ConfigTester(self , config_class=__UpperCAmelCase , d_embed=37)
def __snake_case ( self : int):
self.config_tester.run_common_tests()
def __snake_case ( self : int):
self.model_tester.set_seed()
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__UpperCAmelCase)
def __snake_case ( self : str):
self.model_tester.set_seed()
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__UpperCAmelCase)
def __snake_case ( self : Tuple):
a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a : Optional[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
a : Optional[int] = model_class(__UpperCAmelCase)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
a : Optional[int] = model.get_output_embeddings()
assert isinstance(__UpperCAmelCase , tf.keras.layers.Layer)
a : List[str] = model.get_bias()
assert name is None
else:
a : List[str] = model.get_output_embeddings()
assert x is None
a : Optional[int] = model.get_bias()
assert name is None
def __snake_case ( self : Optional[Any]):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __snake_case ( self : str):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = TFTransfoXLModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.")
def __snake_case ( self : Tuple):
pass
@require_tf
class _A ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Skip test until #12651 is resolved.")
@slow
def __snake_case ( self : List[str]):
a : int = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103")
# fmt: off
a : Tuple = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
a : Union[str, Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
a : Optional[int] = model.generate(__UpperCAmelCase , max_length=200 , do_sample=__UpperCAmelCase)
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase)
| 40 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
a_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =ConvBertTokenizer
def __init__( self : List[str] , a : Union[str, Any]=None , a : Optional[int]=None , a : int=True , a : Tuple="[UNK]" , a : Dict="[SEP]" , a : Dict="[PAD]" , a : List[Any]="[CLS]" , a : Tuple="[MASK]" , a : Dict=True , a : Optional[Any]=None , **a : str , ) -> Dict:
"""simple docstring"""
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a ) != do_lower_case
or normalizer_state.get("strip_accents" , a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : List[str] = getattr(a , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : Any = strip_accents
SCREAMING_SNAKE_CASE : Optional[int] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**a )
SCREAMING_SNAKE_CASE : str = do_lower_case
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : int=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Tuple , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(a , name=a )
return tuple(a ) | 76 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_A : Union[str, Any] ='''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_A : Optional[Any] ='''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_A : Optional[int] ='''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[str]:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = simple_accuracy(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Any = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> str:
lowerCamelCase__ : int = float(pearsonr(UpperCamelCase , UpperCamelCase )[0] )
lowerCamelCase__ : Tuple = float(spearmanr(UpperCamelCase , UpperCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def lowerCamelCase_ ( self: Optional[Any] ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase__ , UpperCamelCase__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCamelCase__ , UpperCamelCase__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 41 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_a)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : Union[str, Any] = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(_a , id=_a) | 76 | 0 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[int]:
random.seed(__A )
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# ^^ safe to call this function even if cuda is not available
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 0.9999 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = False , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = 2 / 3 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
_snake_case = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
_snake_case = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_snake_case = True
if kwargs.get('max_value' , lowerCAmelCase_ ) is not None:
_snake_case = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
_snake_case = kwargs['max_value']
if kwargs.get('min_value' , lowerCAmelCase_ ) is not None:
_snake_case = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
_snake_case = kwargs['min_value']
_snake_case = list(lowerCAmelCase_ )
_snake_case = [p.clone().detach() for p in parameters]
if kwargs.get('device' , lowerCAmelCase_ ) is not None:
_snake_case = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs['device'] )
_snake_case = None
_snake_case = decay
_snake_case = min_decay
_snake_case = update_after_step
_snake_case = use_ema_warmup
_snake_case = inv_gamma
_snake_case = power
_snake_case = 0
_snake_case = None # set in `step()`
_snake_case = model_cls
_snake_case = model_config
@classmethod
def lowerCamelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case = model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
_snake_case = model_cls.from_pretrained(lowerCAmelCase_ )
_snake_case = cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
_snake_case = self.model_cls.from_config(self.model_config )
_snake_case = self.state_dict()
state_dict.pop('shadow_params' , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_snake_case = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_snake_case = (1 + step) / (10 + step)
_snake_case = min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
_snake_case = max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
_snake_case = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
_snake_case = parameters.parameters()
_snake_case = list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_snake_case = self.get_decay(self.optimization_step )
_snake_case = decay
_snake_case = 1 - decay
_snake_case = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_snake_case = deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = [
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def lowerCamelCase ( self ):
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_snake_case = None
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = copy.deepcopy(lowerCAmelCase_ )
_snake_case = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
_snake_case = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError('Invalid min_decay' )
_snake_case = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError('Invalid optimization_step' )
_snake_case = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError('Invalid update_after_step' )
_snake_case = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError('Invalid use_ema_warmup' )
_snake_case = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
_snake_case = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
_snake_case = state_dict.get('shadow_params' , lowerCAmelCase_ )
if shadow_params is not None:
_snake_case = shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 42 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , a : int , a : Optional[int]=13 , a : Optional[int]=3 , a : int=224 , a : Optional[int]=30 , a : int=400 , a : Union[str, Any]=True , a : int=None , a : Tuple=True , a : Tuple=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : str = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : int = do_normalize
SCREAMING_SNAKE_CASE : Tuple = image_mean
SCREAMING_SNAKE_CASE : Tuple = image_std
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , ) | 76 | 0 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Any = DDIMPipeline
a__ : Dict = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : str = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
a__ : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
a__ : List[Any] = False
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
__UpperCamelCase :Union[str, Any] = DDIMScheduler()
__UpperCamelCase :str = {'''unet''': unet, '''scheduler''': scheduler}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Optional[Any]:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Optional[Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Dict = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :List[str] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :str = '''cpu'''
__UpperCamelCase :Tuple = self.get_dummy_components()
__UpperCamelCase :Optional[Any] = self.pipeline_class(**__lowercase)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Any = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :Optional[Any] = pipe(**__lowercase).images
__UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3))
__UpperCamelCase :Optional[int] = np.array(
[1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4])
__UpperCamelCase :str = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(__lowercase , 1E-3)
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[str] = '''google/ddpm-cifar10-32'''
__UpperCamelCase :Union[str, Any] = UNetaDModel.from_pretrained(__lowercase)
__UpperCamelCase :str = DDIMScheduler()
__UpperCamelCase :Union[str, Any] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase)
ddim.to(__lowercase)
ddim.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = torch.manual_seed(0)
__UpperCamelCase :Any = ddim(generator=__lowercase , eta=0.0 , output_type='''numpy''').images
__UpperCamelCase :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :int = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = '''google/ddpm-ema-bedroom-256'''
__UpperCamelCase :List[str] = UNetaDModel.from_pretrained(__lowercase)
__UpperCamelCase :Dict = DDIMScheduler.from_pretrained(__lowercase)
__UpperCamelCase :Optional[Any] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase)
ddpm.to(__lowercase)
ddpm.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = torch.manual_seed(0)
__UpperCamelCase :str = ddpm(generator=__lowercase , output_type='''numpy''').images
__UpperCamelCase :Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCamelCase :Tuple = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 43 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Any = tokenizer(example["content"] , truncation=_a)["input_ids"]
SCREAMING_SNAKE_CASE : Dict = len(example["content"]) / len(output["input_ids"])
return output
a_ = HfArgumentParser(PretokenizationArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a_ = time.time()
a_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 76 | 0 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_a : Optional[int] = logging.get_logger(__name__)
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = UNetaDModel
_UpperCamelCase : int = "sample"
@property
def __A ( self ):
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : Optional[int] = 3
_lowerCAmelCase : List[str] = (32, 32)
_lowerCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : Dict = torch.tensor([10] ).to(a__ )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ):
return (3, 32, 32)
@property
def __A ( self ):
return (3, 32, 32)
def __A ( self ):
_lowerCAmelCase : str = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
_lowerCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = UNetaDModel
_UpperCamelCase : Dict = "sample"
@property
def __A ( self ):
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : int = 4
_lowerCAmelCase : Optional[int] = (32, 32)
_lowerCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : int = torch.tensor([10] ).to(a__ )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ):
return (4, 32, 32)
@property
def __A ( self ):
return (4, 32, 32)
def __A ( self ):
_lowerCAmelCase : Dict = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
_lowerCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(a__ )
_lowerCAmelCase : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=a__ )
model.to(a__ )
_lowerCAmelCase : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __A ( self ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_lowerCAmelCase , _lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=a__ )
model_accelerate.to(a__ )
model_accelerate.eval()
_lowerCAmelCase : int = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase : Optional[int] = noise.to(a__ )
_lowerCAmelCase : Dict = torch.tensor([10] * noise.shape[0] ).to(a__ )
_lowerCAmelCase : Dict = model_accelerate(a__ , a__ )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=a__ , low_cpu_mem_usage=a__ )
model_normal_load.to(a__ )
model_normal_load.eval()
_lowerCAmelCase : Dict = model_normal_load(a__ , a__ )["""sample"""]
assert torch_all_close(a__ , a__ , rtol=1e-3 )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(a__ )
_lowerCAmelCase : List[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase : List[str] = noise.to(a__ )
_lowerCAmelCase : Any = torch.tensor([10] * noise.shape[0] ).to(a__ )
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(a__ , a__ ).sample
_lowerCAmelCase : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_lowerCAmelCase : Optional[int] = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(a__ , a__ , rtol=1e-3 ) )
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = UNetaDModel
_UpperCamelCase : Any = "sample"
@property
def __A ( self , a__=(32, 32) ):
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : Dict = 3
_lowerCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=a__ )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ):
return (3, 32, 32)
@property
def __A ( self ):
return (3, 32, 32)
def __A ( self ):
_lowerCAmelCase : List[Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
_lowerCAmelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
@slow
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(a__ )
_lowerCAmelCase : str = self.dummy_input
_lowerCAmelCase : Optional[Any] = floats_tensor((4, 3) + (256, 256) ).to(a__ )
_lowerCAmelCase : Any = noise
_lowerCAmelCase : str = model(**a__ )
assert image is not None, "Make sure output is not None"
@slow
def __A ( self ):
_lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(a__ )
_lowerCAmelCase : str = 4
_lowerCAmelCase : List[str] = 3
_lowerCAmelCase : Any = (256, 256)
_lowerCAmelCase : List[Any] = torch.ones((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : str = torch.tensor(batch_size * [1e-4] ).to(a__ )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , a__ ).sample
_lowerCAmelCase : Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowerCAmelCase : int = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(a__ , a__ , rtol=1e-2 ) )
def __A ( self ):
_lowerCAmelCase : int = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(a__ )
_lowerCAmelCase : Union[str, Any] = 4
_lowerCAmelCase : Tuple = 3
_lowerCAmelCase : int = (32, 32)
_lowerCAmelCase : List[str] = torch.ones((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : List[Any] = torch.tensor(batch_size * [1e-4] ).to(a__ )
with torch.no_grad():
_lowerCAmelCase : int = model(a__ , a__ ).sample
_lowerCAmelCase : Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowerCAmelCase : Optional[Any] = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(a__ , a__ , rtol=1e-2 ) )
def __A ( self ):
# not required for this model
pass
| 44 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
# initialize config
if "resnet-50" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-50")
elif "resnet-101" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-101")
else:
raise ValueError("Model name should include either resnet50 or resnet101")
SCREAMING_SNAKE_CASE : str = DetrConfig(use_timm_backbone=_a , backbone_config=_a)
# set label attributes
SCREAMING_SNAKE_CASE : List[str] = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE : Union[str, Any] = 250
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 91
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Union[str, Any] = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r"))
SCREAMING_SNAKE_CASE : int = {int(_a): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase__ ( _a):
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight"))
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight"))
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias"))
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean"))
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var"))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
))
# 3 convs
for i in range(3):
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
f"encoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
f"decoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
])
return rename_keys
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : str = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : int = val
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE : Optional[Any] = ""
if is_panoptic:
SCREAMING_SNAKE_CASE : Optional[int] = "detr."
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(_a , stream=_a).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( _a , _a=None , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = get_detr_config(_a)
# load original model from torch hub
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f"Converting model {model_name}...")
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_a).eval()
SCREAMING_SNAKE_CASE : Tuple = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_a):
if is_panoptic:
SCREAMING_SNAKE_CASE : List[str] = "detr." + src
rename_key(_a , _a , _a)
# query, key and value matrices need special treatment
read_in_q_k_v(_a , is_panoptic=_a)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE : List[Any] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr")
and not key.startswith("class_labels_classifier")
and not key.startswith("bbox_predictor")
):
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Optional[int] = val
elif key.startswith("bbox_attention") or key.startswith("mask_head"):
continue
else:
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : List[Any] = val
else:
if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Any = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : int = DetrForSegmentation(_a) if is_panoptic else DetrForObjectDetection(_a)
model.load_state_dict(_a)
model.eval()
# verify our conversion on an image
SCREAMING_SNAKE_CASE : int = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE : Optional[int] = DetrImageProcessor(format=_a)
SCREAMING_SNAKE_CASE : List[str] = processor(images=prepare_img() , return_tensors="pt")
SCREAMING_SNAKE_CASE : Any = encoding["pixel_values"]
SCREAMING_SNAKE_CASE : Optional[Any] = detr(_a)
SCREAMING_SNAKE_CASE : Any = model(_a)
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3)
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
Path(_a).mkdir(exist_ok=_a)
model.save_pretrained(_a)
processor.save_pretrained(_a)
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub...")
model.push_to_hub(f"nielsr/{model_name}")
processor.push_to_hub(f"nielsr/{model_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
a_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 76 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=7 , _a=3 , _a=30 , _a=400 , _a=True , _a=None , _a=0.9 , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ):
__a = size if size is not None else {'''shortest_edge''': 30}
__a = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize_and_center_crop
__a = size
__a = crop_pct
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
def __UpperCAmelCase ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = PoolFormerImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ):
__a = PoolFormerImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''crop_pct''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
def __UpperCAmelCase ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 45 |
import os
def lowerCamelCase__ ( ):
with open(os.path.dirname(_a) + "/p022_names.txt") as file:
SCREAMING_SNAKE_CASE : List[str] = str(file.readlines()[0])
SCREAMING_SNAKE_CASE : List[Any] = names.replace("\"" , "").split(",")
names.sort()
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Dict = 0
for i, name in enumerate(_a):
for letter in name:
name_score += ord(_a) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE : str = 0
return total_score
if __name__ == "__main__":
print(solution()) | 76 | 0 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE__ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
SCREAMING_SNAKE_CASE__ = logging.WARNING
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = os.getenv("""DATASETS_VERBOSITY""" , SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option DATASETS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def UpperCAmelCase__ ( ):
'''simple docstring'''
return __name__.split(""".""" )[0]
def UpperCAmelCase__ ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if name is None:
lowerCAmelCase = _get_library_name()
return logging.getLogger(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ):
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = False
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowercase :
def __init__( self , *lowercase , **lowercase ) -> List[Any]: # pylint: disable=unused-argument
lowerCAmelCase = args[0] if args else None
def __iter__( self ) -> Dict:
return iter(self._iterator )
def __getattr__( self , lowercase ) -> int:
def empty_fn(*lowercase , **lowercase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Tuple:
return self
def __exit__( self , lowercase , lowercase , lowercase ) -> List[str]:
return
SCREAMING_SNAKE_CASE__ = True
class lowercase :
def __call__( self , *lowercase , lowercase=False , **lowercase ) -> Union[str, Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowercase , **lowercase )
else:
return EmptyTqdm(*lowercase , **lowercase )
def _snake_case ( self , *lowercase , **lowercase ) -> Tuple:
lowerCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase , **lowercase )
def _snake_case ( self ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE__ = _tqdm_cls()
def UpperCAmelCase__ ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase__ ( ):
'''simple docstring'''
global _tqdm_active
lowerCAmelCase = True
def UpperCAmelCase__ ( ):
'''simple docstring'''
global _tqdm_active
lowerCAmelCase = False
| 46 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size))
SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,))
SCREAMING_SNAKE_CASE : int = ya
SCREAMING_SNAKE_CASE : int = xa
for k in range(_a):
SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k])
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
'''simple docstring'''
lowerCamelCase : List[str] = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase : Any = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase : Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 47 |
def lowerCamelCase__ ( _a , _a):
return int((input_a, input_a).count(1) != 0)
def lowerCamelCase__ ( ):
assert or_gate(0 , 0) == 0
assert or_gate(0 , 1) == 1
assert or_gate(1 , 0) == 1
assert or_gate(1 , 1) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1)) | 76 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[int] = eval_examples
lowerCamelCase : Union[str, Any] = post_process_function
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = "eval" ) -> Optional[int]:
lowerCamelCase : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase : str = self.get_eval_dataloader(UpperCamelCase__ )
lowerCamelCase : int = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase : List[str] = self.compute_metrics
lowerCamelCase : List[str] = None
lowerCamelCase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase : Dict = time.time()
try:
lowerCamelCase : Union[str, Any] = eval_loop(
UpperCamelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
lowerCamelCase : Tuple = compute_metrics
lowerCamelCase : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase : int = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions )
lowerCamelCase : int = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowerCamelCase : List[str] = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
else:
lowerCamelCase : Optional[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ )
return metrics
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__ = "test" ) -> Union[str, Any]:
lowerCamelCase : Any = self.get_test_dataloader(UpperCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase : Tuple = self.compute_metrics
lowerCamelCase : Dict = None
lowerCamelCase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase : Optional[int] = time.time()
try:
lowerCamelCase : Optional[int] = eval_loop(
UpperCamelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
lowerCamelCase : Dict = compute_metrics
lowerCamelCase : int = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase : Any = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions , "predict" )
lowerCamelCase : Union[str, Any] = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowerCamelCase : List[str] = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ )
| 48 |
a_ = 8.314_4598
def lowerCamelCase__ ( _a , _a):
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K")
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol")
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a_ = 300
a_ = 28
a_ = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''') | 76 | 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
__a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def __snake_case ( _UpperCAmelCase ):
__a = ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(_UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : PreTrainedTokenizerBase
UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = -100
UpperCamelCase__ : str = "pt"
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids''']).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 49 |
a_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : int = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE : int = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
# get the last node from the path
SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE : List[str] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE : List[Any] = list(_a)
new_path.append(_a)
queue.append(_a)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_a)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase__ ( _a , _a , _a):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE : str = [start]
SCREAMING_SNAKE_CASE : Optional[Any] = set(_a)
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE : Union[str, Any] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
if node == target:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_a)
queue.append(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4 | 76 | 0 |
from math import factorial
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100 ) -> int:
return sum(map(_UpperCAmelCase , str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 50 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : Tuple = tokenizer("Hello there" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : str = model(a , labels=a ).loss
SCREAMING_SNAKE_CASE : Any = -tf.math.reduce_mean(a ).numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 ) | 76 | 0 |
from datetime import datetime
import requests
def A (__A : str ) -> bytes:
"""simple docstring"""
UpperCAmelCase_ = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase_ = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__A ).content
if __name__ == "__main__":
snake_case_ : Optional[Any] = input("Enter Video/IGTV url: ").strip()
snake_case_ : Any = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 51 |
from math import factorial
def lowerCamelCase__ ( _a , _a , _a):
if successes > trials:
raise ValueError("successes must be lower or equal to trials")
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers")
if not isinstance(_a , _a) or not isinstance(_a , _a):
raise ValueError("the function is defined for non-negative integers")
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0")
SCREAMING_SNAKE_CASE : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
SCREAMING_SNAKE_CASE : List[Any] = float(factorial(_a))
coefficient /= factorial(_a) * factorial(trials - successes)
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75)) | 76 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__lowerCamelCase : Any = logging.get_logger(__name__)
@dataclass
class A__ :
_UpperCAmelCase :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
_UpperCAmelCase :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_UpperCAmelCase :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCAmelCase :bool = field(
default=__snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.task_name.lower()
class A__ ( __snake_case ):
_UpperCAmelCase :List[str] = 'train'
_UpperCAmelCase :List[str] = 'dev'
_UpperCAmelCase :List[str] = 'test'
class A__ ( __snake_case ):
_UpperCAmelCase :GlueDataTrainingArguments
_UpperCAmelCase :str
_UpperCAmelCase :List[InputFeatures]
def __init__( self , A_ , A_ , A_ = None , A_ = Split.train , A_ = None , ):
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , A_ , )
UpperCamelCase : Dict = args
UpperCamelCase : Tuple = glue_processors[args.task_name]()
UpperCamelCase : int = glue_output_modes[args.task_name]
if isinstance(A_ , A_ ):
try:
UpperCamelCase : Any = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
UpperCamelCase : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCamelCase : Dict = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase , UpperCamelCase : str = label_list[2], label_list[1]
UpperCamelCase : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase : Union[str, Any] = cached_features_file + ".lock"
with FileLock(A_ ):
if os.path.exists(A_ ) and not args.overwrite_cache:
UpperCamelCase : List[str] = time.time()
UpperCamelCase : List[str] = torch.load(A_ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCamelCase : List[str] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCamelCase : List[str] = self.processor.get_test_examples(args.data_dir )
else:
UpperCamelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCamelCase : Union[str, Any] = examples[:limit_length]
UpperCamelCase : Any = glue_convert_examples_to_features(
A_ , A_ , max_length=args.max_seq_length , label_list=A_ , output_mode=self.output_mode , )
UpperCamelCase : List[Any] = time.time()
torch.save(self.features , A_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , A_ ):
'''simple docstring'''
return self.features[i]
def __UpperCamelCase( self ):
'''simple docstring'''
return self.label_list
| 52 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =CustomTokenizer
pass | 76 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[int] ="BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] ="AutoTokenizer"
def __init__( self : Dict , __A : Optional[int] , __A : Union[str, Any] , __A : Any ):
super().__init__(__A , __A )
# add QFormer tokenizer
__UpperCamelCase = qformer_tokenizer
def __call__( self : str , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
__UpperCamelCase = BatchFeature()
if text is not None:
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
encoding.update(__A )
__UpperCamelCase = self.qformer_tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
__UpperCamelCase = qformer_text_encoding.pop('input_ids' )
__UpperCamelCase = qformer_text_encoding.pop('attention_mask' )
if images is not None:
__UpperCamelCase = self.image_processor(__A , return_tensors=__A )
encoding.update(__A )
return encoding
def _lowerCamelCase ( self : List[str] , *__A : Dict , **__A : Dict ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowerCamelCase ( self : Optional[Any] , *__A : Union[str, Any] , **__A : Optional[int] ):
return self.tokenizer.decode(*__A , **__A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self : Union[str, Any] , __A : Union[str, Any] , **__A : Dict ):
if os.path.isfile(__A ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(__A )
return super().save_pretrained(__A , **__A )
@classmethod
def _lowerCamelCase ( cls : List[Any] , __A : int , **__A : Dict ):
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , subfolder='qformer_tokenizer' )
__UpperCamelCase = cls._get_arguments_from_pretrained(__A , **__A )
args.append(__A )
return cls(*__A )
| 53 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a_ = logging.getLogger(__name__)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = False
def __UpperCamelCase ( self : str , a : str , a : Optional[int] , a : Any , a : str ) -> List[Any]:
"""simple docstring"""
if not self.initialized:
SCREAMING_SNAKE_CASE : List[str] = RagRetriever(
a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , )
SCREAMING_SNAKE_CASE : Optional[int] = True
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self.retriever.index.init_index()
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.retriever._main_retrieve(a , a )
return doc_ids, retrieved_doc_embeds
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Tuple , a : Any , a : Tuple , a : Tuple , a : Tuple , a : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
if index is not None and index.is_initialized() and len(a ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , )
SCREAMING_SNAKE_CASE : Optional[Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(a , a , a , a )
for worker in self.retrieval_workers
] )
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Any ) -> int:
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
SCREAMING_SNAKE_CASE : Optional[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = ray.get(random_worker.retrieve.remote(a , a ) )
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self._main_retrieve(a , a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a )
@classmethod
def __UpperCamelCase ( cls : str , a : Optional[Any] , a : Any=None , **a : List[Any] ) -> str:
"""simple docstring"""
return super(a , cls ).get_tokenizers(a , a , **a )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , a : int , a : Any , a : List[Any]=None , **a : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = kwargs.pop("config" , a ) or RagConfig.from_pretrained(a , **a )
SCREAMING_SNAKE_CASE : List[Any] = RagTokenizer.from_pretrained(a , config=a )
SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.question_encoder
SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
SCREAMING_SNAKE_CASE : str = "custom"
SCREAMING_SNAKE_CASE : List[Any] = CustomHFIndex(config.retrieval_vector_size , a )
else:
SCREAMING_SNAKE_CASE : List[str] = cls._build_index(a )
return cls(
a , question_encoder_tokenizer=a , generator_tokenizer=a , retrieval_workers=a , index=a , ) | 76 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54 |
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = data
SCREAMING_SNAKE_CASE : int = None
def __repr__( self : str ) -> str:
"""simple docstring"""
return F"Node({self.data})"
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = None
def __iter__( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self : str ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
return "->".join([str(a ) for item in self] )
def __getitem__( self : List[Any] , a : int ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Tuple , a : int , a : Any ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
SCREAMING_SNAKE_CASE : str = self.head
for _ in range(a ):
SCREAMING_SNAKE_CASE : str = current.next
SCREAMING_SNAKE_CASE : Any = data
def __UpperCamelCase ( self : List[str] , a : Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , a )
def __UpperCamelCase ( self : Union[str, Any] , a : Any ) -> None:
"""simple docstring"""
self.insert_nth(0 , a )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Any ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
SCREAMING_SNAKE_CASE : Any = Node(a )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Optional[int] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : List[Any] = new_node
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Optional[int] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next
SCREAMING_SNAKE_CASE : int = new_node
def __UpperCamelCase ( self : Optional[int] ) -> None: # print every node data
"""simple docstring"""
print(self )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def __UpperCamelCase ( self : Any ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __UpperCamelCase ( self : List[str] , a : int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
SCREAMING_SNAKE_CASE : Tuple = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next.next
return delete_node.data
def __UpperCamelCase ( self : List[Any] ) -> bool:
"""simple docstring"""
return self.head is None
def __UpperCamelCase ( self : Optional[int] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : str = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Any = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : List[Any] = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : Any = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : str = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : Optional[Any] = prev
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(_a) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10):
assert len(_a) == i
linked_list.insert_nth(_a , i + 1)
assert str(_a) == "->".join(str(_a) for i in range(1 , 11))
linked_list.insert_head(0)
linked_list.insert_tail(11)
assert str(_a) == "->".join(str(_a) for i in range(0 , 12))
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9) == 10
assert linked_list.delete_tail() == 11
assert len(_a) == 9
assert str(_a) == "->".join(str(_a) for i in range(1 , 10))
assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True
for i in range(0 , 9):
SCREAMING_SNAKE_CASE : str = -i
assert all(linked_list[i] == -i for i in range(0 , 9)) is True
linked_list.reverse()
assert str(_a) == "->".join(str(_a) for i in range(-8 , 1))
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = [
-9,
100,
Node(77345112),
"dlrow olleH",
7,
5555,
0,
-192.5_5555,
"Hello, world!",
77.9,
Node(10),
None,
None,
12.20,
]
SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(_a)
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_a) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : Any = linked_list.delete_nth(10)
assert result is None
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!"))
assert (
str(_a)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_a)
assert (
str(_a)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_a)
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase__ ( ):
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head ").strip())
linked_list.insert_head(input("Inserting 2nd at head ").strip())
print("\nPrint list:")
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail ").strip())
linked_list.insert_tail(input("Inserting 2nd at tail ").strip())
print("\nPrint list:")
linked_list.print_list()
print("\nDelete head")
linked_list.delete_head()
print("Delete tail")
linked_list.delete_tail()
print("\nPrint list:")
linked_list.print_list()
print("\nReverse linked list")
linked_list.reverse()
print("\nPrint list:")
linked_list.print_list()
print("\nString representation of linked list:")
print(_a)
print("\nReading/changing Node data using indexing:")
print(f"Element at Position 1: {linked_list[1]}")
SCREAMING_SNAKE_CASE : Dict = input("Enter New Value: ").strip()
print("New list:")
print(_a)
print(f"length of linked_list is : {len(_a)}")
if __name__ == "__main__":
main() | 76 | 0 |
'''simple docstring'''
import os
def __snake_case ( UpperCAmelCase_ : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as in_file:
lowerCamelCase_ = in_file.read()
lowerCamelCase_ = [[int(UpperCAmelCase_ ) for cell in row.split("," )] for row in data.strip().splitlines()]
lowerCamelCase_ = [[0 for cell in row] for row in grid]
lowerCamelCase_ = len(grid[0] )
lowerCamelCase_ = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
lowerCamelCase_ = grid[0][0]
for i in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCAmelCase_ ):
for j in range(1 , UpperCAmelCase_ ):
lowerCamelCase_ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ = get_logger(__name__)
class _UpperCamelCase ( enum.Enum ):
'''simple docstring'''
lowerCamelCase__ ='all_checks'
lowerCamelCase__ ='basic_checks'
lowerCamelCase__ ='no_checks'
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a , _a=None):
if expected_checksums is None:
logger.info("Unable to verify checksums.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedDownloadedFile(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : str = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE : Tuple = " for " + verification_name if verification_name is not None else ""
if len(_a) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error")
logger.info("All the checksums matched successfully" + for_verification_name)
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a):
if expected_splits is None:
logger.info("Unable to verify splits sizes.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreSplits(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedSplits(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : List[str] = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_a) > 0:
raise NonMatchingSplitsSizesError(str(_a))
logger.info("All the splits matched successfully.")
def lowerCamelCase__ ( _a , _a = True):
if record_checksum:
SCREAMING_SNAKE_CASE : List[str] = shaaaa()
with open(_a , "rb") as f:
for chunk in iter(lambda: f.read(1 << 20) , b""):
m.update(_a)
SCREAMING_SNAKE_CASE : Optional[int] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE : List[str] = None
return {"num_bytes": os.path.getsize(_a), "checksum": checksum}
def lowerCamelCase__ ( _a):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False | 76 | 0 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
return quad(__UpperCAmelCase, 0, __UpperCAmelCase, args=(__UpperCAmelCase) )[0]
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> float:
'''simple docstring'''
return math.pow(__UpperCAmelCase, z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _a , _a):
# Load checkpoint
SCREAMING_SNAKE_CASE : int = torch.load(_a , map_location="cpu")
SCREAMING_SNAKE_CASE : Dict = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : List[str] = v
else:
SCREAMING_SNAKE_CASE : int = v
SCREAMING_SNAKE_CASE : int = chkpt["params"]
SCREAMING_SNAKE_CASE : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray))}
SCREAMING_SNAKE_CASE : List[Any] = chkpt["dico_word2id"]
SCREAMING_SNAKE_CASE : List[Any] = {s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@" , ""): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"Save PyTorch model to {pytorch_weights_dump_path}")
torch.save(_a , _a)
print(f"Save configuration file to {pytorch_config_dump_path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , indent=2) + "\n")
print(f"Save vocab file to {pytorch_config_dump_path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , indent=2) + "\n")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 76 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : List[str] =PegasusConfig
__UpperCAmelCase : List[Any] ={}
__UpperCAmelCase : int ="""gelu"""
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=40 , __a=2 , __a=1 , __a=0 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCAmelCase = prepare_pegasus_inputs_dict(__a , __a , __a )
return config, inputs_dict
def snake_case ( self , __a , __a ):
__lowerCAmelCase = TFPegasusModel(config=__a ).get_decoder()
__lowerCAmelCase = inputs_dict["input_ids"]
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict["attention_mask"][:1, :]
__lowerCAmelCase = inputs_dict["head_mask"]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(__a , attention_mask=__a )[0]
__lowerCAmelCase = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__UpperCAmelCase : List[str] =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase : str =(
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : List[str] =True
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : List[Any] =False
def snake_case ( self ):
__lowerCAmelCase = TFPegasusModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =[
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCAmelCase : str =[
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__UpperCAmelCase : int ="""google/pegasus-xsum"""
@cached_property
def snake_case ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case ( self ):
__lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case ( self , **__a ):
__lowerCAmelCase = self.translate_src_text(**__a )
assert self.expected_text == generated_words
def snake_case ( self , **__a ):
__lowerCAmelCase = self.tokenizer(self.src_text , **__a , padding=__a , return_tensors="tf" )
__lowerCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , )
__lowerCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )
return generated_words
@slow
def snake_case ( self ):
self._assert_generated_batch_equal_expected()
| 57 |
def lowerCamelCase__ ( _a , _a):
_validate_point(_a)
_validate_point(_a)
if len(_a) != len(_a):
raise ValueError("Both points must be in the same n-dimensional space")
return float(sum(abs(a - b) for a, b in zip(_a , _a)))
def lowerCamelCase__ ( _a):
if point:
if isinstance(_a , _a):
for item in point:
if not isinstance(_a , (int, float)):
SCREAMING_SNAKE_CASE : List[Any] = (
"Expected a list of numbers as input, found "
f"{type(_a).__name__}"
)
raise TypeError(_a)
else:
SCREAMING_SNAKE_CASE : List[Any] = f"Expected a list of numbers as input, found {type(_a).__name__}"
raise TypeError(_a)
else:
raise ValueError("Missing an input")
def lowerCamelCase__ ( _a , _a):
_validate_point(_a)
_validate_point(_a)
if len(_a) != len(_a):
raise ValueError("Both points must be in the same n-dimensional space")
return float(sum(abs(x - y) for x, y in zip(_a , _a)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a_ ( yaml.SafeLoader ):
'''simple docstring'''
def snake_case_( self , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = [self.constructed_objects[key_node] for key_node, _ in node.value]
_SCREAMING_SNAKE_CASE = [tuple(A ) if isinstance(A , A ) else key for key in keys]
_SCREAMING_SNAKE_CASE = Counter(A )
_SCREAMING_SNAKE_CASE = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'Got duplicate yaml keys: {duplicate_keys}' )
def snake_case_( self , A , A=False ) -> List[str]:
_SCREAMING_SNAKE_CASE = super().construct_mapping(A , deep=A )
self._check_no_duplicates_on_constructed_node(A )
return mapping
def lowerCamelCase ( __lowerCamelCase : str ) ->Tuple[Optional[str], str]:
_SCREAMING_SNAKE_CASE = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_SCREAMING_SNAKE_CASE = full_content[1:].index("""---""" ) + 1
_SCREAMING_SNAKE_CASE = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__lowerCamelCase )
class a_ ( snake_case_ ):
'''simple docstring'''
# class attributes
UpperCamelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def snake_case_( cls , A ) -> "DatasetMetadata":
with open(A , encoding="""utf-8""" ) as readme_file:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(A )
else:
return cls()
def snake_case_( self , A ) -> Dict:
if path.exists():
with open(A , encoding="""utf-8""" ) as readme_file:
_SCREAMING_SNAKE_CASE = readme_file.read()
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self._to_readme(A )
with open(A , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(A )
def snake_case_( self , A = None ) -> str:
if readme_content is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = _split_yaml_from_readme(A )
_SCREAMING_SNAKE_CASE = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
_SCREAMING_SNAKE_CASE = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def snake_case_( cls , A ) -> "DatasetMetadata":
_SCREAMING_SNAKE_CASE = yaml.load(A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_SCREAMING_SNAKE_CASE = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**A )
def snake_case_( self ) -> str:
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=A , allow_unicode=A , encoding="""utf-8""" , ).decode("""utf-8""" )
lowercase_ = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowercase_ = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
lowercase_ = ap.parse_args()
lowercase_ = Path(args.readme_filepath)
lowercase_ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 58 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='vit_msn'
def __init__( self : str , a : Tuple=768 , a : Tuple=12 , a : Any=12 , a : int=3072 , a : List[Any]="gelu" , a : Dict=0.0 , a : int=0.0 , a : str=0.02 , a : List[str]=1e-06 , a : List[Any]=224 , a : Union[str, Any]=16 , a : Union[str, Any]=3 , a : Tuple=True , **a : Dict , ) -> List[Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[str] = qkv_bias | 76 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = tempfile.mkdtemp()
snake_case : Tuple = BlipImageProcessor()
snake_case : Optional[int] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
snake_case : Dict = BlipaProcessor(snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : str , **snake_case__ : int ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def _SCREAMING_SNAKE_CASE (self : str , **snake_case__ : Any ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case : Tuple = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
snake_case : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case : Union[str, Any] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
snake_case : Dict = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.get_image_processor()
snake_case : str = self.get_tokenizer()
snake_case : Optional[Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : Optional[int] = image_processor(snake_case__ , return_tensors="np" )
snake_case : List[Any] = processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Tuple:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : Tuple = self.get_tokenizer()
snake_case : List[str] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Union[str, Any] = "lower newer"
snake_case : int = processor(text=snake_case__ )
snake_case : List[str] = tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : List[Any] = self.get_tokenizer()
snake_case : List[str] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : int = "lower newer"
snake_case : str = self.prepare_image_inputs()
snake_case : Union[str, Any] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = self.get_image_processor()
snake_case : List[str] = self.get_tokenizer()
snake_case : Tuple = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : Dict = processor.batch_decode(snake_case__ )
snake_case : str = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : int = self.get_tokenizer()
snake_case : Optional[int] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : List[str] = "lower newer"
snake_case : int = self.prepare_image_inputs()
snake_case : Dict = processor(text=snake_case__ , images=snake_case__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 59 |
import baseaa
def lowerCamelCase__ ( _a):
return baseaa.aaaencode(string.encode("utf-8"))
def lowerCamelCase__ ( _a):
return baseaa.aaadecode(_a).decode("utf-8")
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
snake_case__ : Tuple = datasets.logging.get_logger(__name__)
snake_case__ : Any = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
snake_case__ : Any = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
snake_case__ : Dict = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_( datasets.Metric ):
def lowerCamelCase__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Tuple ):
if self.config_name == "default":
lowerCAmelCase : Tuple = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
lowerCAmelCase : Tuple = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=False ):
if gpus is None:
lowerCAmelCase : Optional[int] = 1 if torch.cuda.is_available() else 0
lowerCAmelCase : str = {'''src''': sources, '''mt''': predictions, '''ref''': references}
lowerCAmelCase : Optional[int] = [dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) for t in zip(*data.values() )]
lowerCAmelCase, lowerCAmelCase : Tuple = self.scorer.predict(UpperCamelCase_ , gpus=UpperCamelCase_ , progress_bar=UpperCamelCase_ )
return {"mean_score": mean_score, "scores": scores}
| 60 |
from datetime import datetime as dt
import os
from github import Github
a_ = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : int = Github(os.environ["GITHUB_TOKEN"])
SCREAMING_SNAKE_CASE : List[str] = g.get_repo("huggingface/transformers")
SCREAMING_SNAKE_CASE : Optional[int] = repo.get_issues(state="open")
for issue in open_issues:
SCREAMING_SNAKE_CASE : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _a: i.created_at , reverse=_a)
SCREAMING_SNAKE_CASE : str = comments[0] if len(_a) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
if __name__ == "__main__":
main() | 76 | 0 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_a = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
_a = logging.WARNING
def __a ( ):
UpperCAmelCase_ : Dict = os.getenv("DATASETS_VERBOSITY", __lowerCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def __a ( ):
return __name__.split("." )[0]
def __a ( ):
return logging.getLogger(_get_library_name() )
def __a ( ):
# Apply our default configuration to the library root logger.
UpperCAmelCase_ : int = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def __a ( ):
UpperCAmelCase_ : int = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def __a ( __lowerCamelCase = None ):
if name is None:
UpperCAmelCase_ : str = _get_library_name()
return logging.getLogger(__lowerCamelCase )
def __a ( ):
return _get_library_root_logger().getEffectiveLevel()
def __a ( __lowerCamelCase ):
_get_library_root_logger().setLevel(__lowerCamelCase )
def __a ( ):
return set_verbosity(__lowerCamelCase )
def __a ( ):
return set_verbosity(__lowerCamelCase )
def __a ( ):
return set_verbosity(__lowerCamelCase )
def __a ( ):
return set_verbosity(__lowerCamelCase )
def __a ( ):
UpperCAmelCase_ : Tuple = False
def __a ( ):
UpperCAmelCase_ : List[Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class A_ :
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ): # pylint: disable=unused-argument
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = args[0] if args else None
def __iter__( self ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , lowercase_ ):
"""simple docstring"""
def empty_fn(*lowercase_ , **lowercase_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
"""simple docstring"""
return self
def __exit__( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
return
_a = True
class A_ :
'''simple docstring'''
def __call__( self , *lowercase_ , lowercase_=False , **lowercase_ ):
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowercase_ , **lowercase_ )
else:
return EmptyTqdm(*lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_a = _tqdm_cls()
def __a ( ):
global _tqdm_active
return bool(_tqdm_active )
def __a ( ):
global _tqdm_active
UpperCAmelCase_ : Tuple = True
def __a ( ):
global _tqdm_active
UpperCAmelCase_ : int = False
| 61 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
if isinstance(_a , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(_a , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(_a):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}")
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =['pixel_values']
def __init__( self : Optional[Any] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {"shortest_edge": 256}
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE : str = get_size_dict(a , param_name="crop_size" )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop
SCREAMING_SNAKE_CASE : int = crop_size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : Tuple = offset
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : Optional[Any] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(a , size["shortest_edge"] , default_to_square=a )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE : Dict = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a , size=a , resample=a , data_format=a , **a )
def __UpperCamelCase ( self : List[str] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __UpperCamelCase ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : bool = True , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = image.astype(np.floataa )
if offset:
SCREAMING_SNAKE_CASE : Union[str, Any] = image - (scale / 2)
return rescale(a , scale=a , data_format=a , **a )
def __UpperCamelCase ( self : int , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def __UpperCamelCase ( self : Tuple , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : List[str] = to_numpy_array(a )
if do_resize:
SCREAMING_SNAKE_CASE : Optional[Any] = self.resize(image=a , size=a , resample=a )
if do_center_crop:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.center_crop(a , size=a )
if do_rescale:
SCREAMING_SNAKE_CASE : Any = self.rescale(image=a , scale=a , offset=a )
if do_normalize:
SCREAMING_SNAKE_CASE : Tuple = self.normalize(image=a , mean=a , std=a )
SCREAMING_SNAKE_CASE : Optional[int] = to_channel_dimension_format(a , a )
return image
def __UpperCamelCase ( self : Dict , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : int = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : int = size if size is not None else self.size
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(a , param_name="crop_size" )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
SCREAMING_SNAKE_CASE : Optional[int] = make_batched(a )
SCREAMING_SNAKE_CASE : List[Any] = [
[
self._preprocess_image(
image=a , do_resize=a , size=a , resample=a , do_center_crop=a , crop_size=a , do_rescale=a , rescale_factor=a , offset=a , do_normalize=a , image_mean=a , image_std=a , data_format=a , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": videos}
return BatchFeature(data=a , tensor_type=a ) | 76 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "Salesforce/blip-image-captioning-base"
UpperCAmelCase__ : int = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
UpperCAmelCase__ : Optional[int] = "image_captioner"
UpperCAmelCase__ : Tuple = AutoModelForVisionaSeq
UpperCAmelCase__ : Union[str, Any] = ["image"]
UpperCAmelCase__ : Tuple = ["text"]
def __init__( self , *A_ , **A_ ) -> str:
requires_backends(self , ['vision'] )
super().__init__(*A_ , **A_ )
def _a ( self , A_ ) -> Optional[int]:
return self.pre_processor(images=A_ , return_tensors='pt' )
def _a ( self , A_ ) -> Any:
return self.model.generate(**A_ )
def _a ( self , A_ ) -> str:
return self.pre_processor.batch_decode(A_ , skip_special_tokens=A_ )[0].strip()
| 62 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
a_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =ConvBertTokenizer
def __init__( self : List[str] , a : Union[str, Any]=None , a : Optional[int]=None , a : int=True , a : Tuple="[UNK]" , a : Dict="[SEP]" , a : Dict="[PAD]" , a : List[Any]="[CLS]" , a : Tuple="[MASK]" , a : Dict=True , a : Optional[Any]=None , **a : str , ) -> Dict:
"""simple docstring"""
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a ) != do_lower_case
or normalizer_state.get("strip_accents" , a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : List[str] = getattr(a , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : Any = strip_accents
SCREAMING_SNAKE_CASE : Optional[int] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**a )
SCREAMING_SNAKE_CASE : str = do_lower_case
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : int=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Tuple , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(a , name=a )
return tuple(a ) | 76 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_a)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : Union[str, Any] = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(_a , id=_a) | 76 | 0 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str="shi-labs/oneformer_demo" ):
"""simple docstring"""
with open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) as f:
_snake_case : Optional[int] = json.load(snake_case__ )
_snake_case : str = {}
_snake_case : Union[str, Any] = []
_snake_case : str = []
for key, info in class_info.items():
_snake_case : str = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(snake_case__ ) )
_snake_case : Optional[Any] = thing_ids
_snake_case : str = class_names
return metadata
class lowercase( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[str], a_: Dict, a_: Any=7, a_: int=3, a_: List[Any]=30, a_: Tuple=400, a_: int=None, a_: List[Any]=True, a_: Dict=True, a_: Any=[0.5, 0.5, 0.5], a_: str=[0.5, 0.5, 0.5], a_: Dict=10, a_: List[str]=False, a_: Optional[Any]=255, a_: Optional[Any]="shi-labs/oneformer_demo", a_: Optional[Any]="ade20k_panoptic.json", a_: int=10, ):
'''simple docstring'''
_snake_case : List[Any] = parent
_snake_case : Tuple = batch_size
_snake_case : str = num_channels
_snake_case : int = min_resolution
_snake_case : Tuple = max_resolution
_snake_case : int = do_resize
_snake_case : Optional[Any] = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
_snake_case : Union[str, Any] = do_normalize
_snake_case : Union[str, Any] = image_mean
_snake_case : Dict = image_std
_snake_case : List[str] = class_info_file
_snake_case : Dict = prepare_metadata(a_, a_ )
_snake_case : List[Any] = num_text
_snake_case : Tuple = repo_path
# for the post_process_functions
_snake_case : Tuple = 2
_snake_case : Any = 10
_snake_case : Optional[int] = 10
_snake_case : Any = 3
_snake_case : str = 4
_snake_case : List[str] = num_labels
_snake_case : Any = do_reduce_labels
_snake_case : Union[str, Any] = ignore_index
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self: int, a_: str, a_: Optional[int]=False ):
'''simple docstring'''
if not batched:
_snake_case : Tuple = image_inputs[0]
if isinstance(a_, Image.Image ):
_snake_case , _snake_case : List[str] = image.size
else:
_snake_case , _snake_case : Optional[int] = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
_snake_case : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
_snake_case : List[str] = self.size["""shortest_edge"""]
_snake_case : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
_snake_case : List[str] = self.size["""shortest_edge"""]
_snake_case : List[Any] = self.size["""shortest_edge"""]
else:
_snake_case : Optional[int] = []
for image in image_inputs:
_snake_case , _snake_case : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : Tuple = max(a_, key=lambda a_ : item[0] )[0]
_snake_case : str = max(a_, key=lambda a_ : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ), masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ), )
@require_torch
@require_vision
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
lowercase__ = image_processing_class
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : List[str] = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_, """image_mean""" ) )
self.assertTrue(hasattr(a_, """image_std""" ) )
self.assertTrue(hasattr(a_, """do_normalize""" ) )
self.assertTrue(hasattr(a_, """do_resize""" ) )
self.assertTrue(hasattr(a_, """size""" ) )
self.assertTrue(hasattr(a_, """ignore_index""" ) )
self.assertTrue(hasattr(a_, """class_info_file""" ) )
self.assertTrue(hasattr(a_, """num_text""" ) )
self.assertTrue(hasattr(a_, """repo_path""" ) )
self.assertTrue(hasattr(a_, """metadata""" ) )
self.assertTrue(hasattr(a_, """do_reduce_labels""" ) )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : str = prepare_image_inputs(self.image_processing_tester, equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_, Image.Image )
# Test not batched input
_snake_case : Tuple = image_processor(image_inputs[0], ["""semantic"""], return_tensors="""pt""" ).pixel_values
_snake_case , _snake_case : Dict = self.image_processing_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
_snake_case , _snake_case : Any = self.image_processing_tester.get_expected_values(a_, batched=a_ )
_snake_case : Tuple = image_processor(
a_, ["""semantic"""] * len(a_ ), return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : List[str] = prepare_image_inputs(self.image_processing_tester, equal_resolution=a_, numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, np.ndarray )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0], ["""semantic"""], return_tensors="""pt""" ).pixel_values
_snake_case , _snake_case : Any = self.image_processing_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
_snake_case , _snake_case : Tuple = self.image_processing_tester.get_expected_values(a_, batched=a_ )
_snake_case : Optional[Any] = image_processor(
a_, ["""semantic"""] * len(a_ ), return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Any = prepare_image_inputs(self.image_processing_tester, equal_resolution=a_, torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, torch.Tensor )
# Test not batched input
_snake_case : Dict = image_processor(image_inputs[0], ["""semantic"""], return_tensors="""pt""" ).pixel_values
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(a_, batched=a_ )
_snake_case : str = image_processor(
a_, ["""semantic"""] * len(a_ ), return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self: Dict, a_: List[Any]=False, a_: int=False, a_: Union[str, Any]="np" ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : Dict = self.image_processing_tester.num_labels
_snake_case : int = None
_snake_case : Union[str, Any] = None
_snake_case : Any = prepare_image_inputs(self.image_processing_tester, equal_resolution=a_ )
if with_segmentation_maps:
_snake_case : str = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(a_ ) ) * 2
_snake_case : List[Any] = dict(enumerate(a_ ) )
_snake_case : Any = [
np.random.randint(0, high * 2, (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : List[str] = [Image.fromarray(a_ ) for annotation in annotations]
_snake_case : Dict = image_processor(
a_, ["""semantic"""] * len(a_ ), a_, return_tensors="""pt""", instance_id_to_semantic_id=a_, pad_and_return_pixel_mask=a_, )
return inputs
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
def common(a_: List[Any]=False, a_: Any=None ):
_snake_case : str = self.comm_get_image_processor_inputs(
with_segmentation_maps=a_, is_instance_map=a_, segmentation_type=a_ )
_snake_case : str = inputs["""mask_labels"""]
_snake_case : str = inputs["""class_labels"""]
_snake_case : Optional[int] = inputs["""pixel_values"""]
_snake_case : int = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(a_, a_, a_ ):
self.assertEqual(mask_label.shape[0], class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:] )
self.assertEqual(len(a_ ), self.image_processing_tester.num_text )
common()
common(is_instance_map=a_ )
common(is_instance_map=a_, segmentation_type="""pil""" )
common(is_instance_map=a_, segmentation_type="""pil""" )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = np.zeros((20, 50) )
_snake_case : Any = 1
_snake_case : Dict = 1
_snake_case : Tuple = 1
_snake_case : List[Any] = binary_mask_to_rle(a_ )
self.assertEqual(len(a_ ), 4 )
self.assertEqual(rle[0], 21 )
self.assertEqual(rle[1], 45 )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="""ade20k_panoptic.json""", num_text=self.image_processing_tester.num_text, repo_path="""shi-labs/oneformer_demo""", )
_snake_case : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(a_ )
self.assertEqual(len(a_ ), self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape, (
self.image_processing_tester.height,
self.image_processing_tester.width,
), )
_snake_case : List[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Dict = fature_extractor.post_process_semantic_segmentation(a_, target_sizes=a_ )
self.assertEqual(segmentation[0].shape, target_sizes[0] )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="""ade20k_panoptic.json""", num_text=self.image_processing_tester.num_text, repo_path="""shi-labs/oneformer_demo""", )
_snake_case : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : str = image_processor.post_process_instance_segmentation(a_, threshold=0 )
self.assertTrue(len(a_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ), a_ )
self.assertEqual(
el["""segmentation"""].shape, (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="""ade20k_panoptic.json""", num_text=self.image_processing_tester.num_text, repo_path="""shi-labs/oneformer_demo""", )
_snake_case : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Optional[Any] = image_processor.post_process_panoptic_segmentation(a_, threshold=0 )
self.assertTrue(len(a_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ), a_ )
self.assertEqual(
el["""segmentation"""].shape, (self.image_processing_tester.height, self.image_processing_tester.width) )
| 64 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , a : int , a : Optional[int]=13 , a : Optional[int]=3 , a : int=224 , a : Optional[int]=30 , a : int=400 , a : Union[str, Any]=True , a : int=None , a : Tuple=True , a : Tuple=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : str = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : int = do_normalize
SCREAMING_SNAKE_CASE : Tuple = image_mean
SCREAMING_SNAKE_CASE : Tuple = image_std
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , ) | 76 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[int] = DanceDiffusionPipeline
__UpperCAmelCase : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__UpperCAmelCase : List[Any] = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
__UpperCAmelCase : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__UpperCAmelCase : str = False
__UpperCAmelCase : List[str] = False
def lowercase_ (self : Optional[int] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__UpperCAmelCase , use_timestep_embedding=__UpperCAmelCase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
UpperCAmelCase__ = IPNDMScheduler()
UpperCAmelCase__ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def lowercase_ (self : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int]=0 ) -> Any:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith("mps" ):
UpperCAmelCase__ = torch.manual_seed(__UpperCAmelCase )
else:
UpperCAmelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase__ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def lowercase_ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = DanceDiffusionPipeline(**__UpperCAmelCase )
UpperCAmelCase__ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase__ = pipe(**__UpperCAmelCase )
UpperCAmelCase__ = output.audios
UpperCAmelCase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCAmelCase__ = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowercase_ (self : Tuple ) -> Any:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def lowercase_ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def lowercase_ (self : List[str] ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowercase_ (self : str ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase__ = torch_device
UpperCAmelCase__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
UpperCAmelCase__ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(generator=__UpperCAmelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCAmelCase__ = output.audios
UpperCAmelCase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCAmelCase__ = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = torch_device
UpperCAmelCase__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
UpperCAmelCase__ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(generator=__UpperCAmelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCAmelCase__ = output.audios
UpperCAmelCase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCAmelCase__ = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 65 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Any = tokenizer(example["content"] , truncation=_a)["input_ids"]
SCREAMING_SNAKE_CASE : Dict = len(example["content"]) / len(output["input_ids"])
return output
a_ = HfArgumentParser(PretokenizationArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a_ = time.time()
a_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 76 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__a = {
"distilbert-base-uncased": 5_12,
"distilbert-base-uncased-distilled-squad": 5_12,
"distilbert-base-cased": 5_12,
"distilbert-base-cased-distilled-squad": 5_12,
"distilbert-base-german-cased": 5_12,
"distilbert-base-multilingual-cased": 5_12,
}
__a = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : int = VOCAB_FILES_NAMES
_A : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = PRETRAINED_INIT_CONFIGURATION
_A : int = ["""input_ids""", """attention_mask"""]
_A : str = DistilBertTokenizer
def __init__( self: List[Any] , snake_case: Dict=None , snake_case: Tuple=None , snake_case: List[str]=True , snake_case: int="[UNK]" , snake_case: List[str]="[SEP]" , snake_case: Any="[PAD]" , snake_case: List[Any]="[CLS]" , snake_case: int="[MASK]" , snake_case: Optional[Any]=True , snake_case: Dict=None , **snake_case: List[str] , ) -> Optional[int]:
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
snake_case_ :str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case ) != tokenize_chinese_chars
):
snake_case_ :Optional[Any] = getattr(snake_case , normalizer_state.pop("""type""" ) )
snake_case_ :Any = do_lower_case
snake_case_ :Optional[int] = strip_accents
snake_case_ :Optional[Any] = tokenize_chinese_chars
snake_case_ :Dict = normalizer_class(**snake_case )
snake_case_ :Optional[Any] = do_lower_case
def lowerCAmelCase_ ( self: Dict , snake_case: Union[str, Any] , snake_case: Union[str, Any]=None ) -> int:
snake_case_ :Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self: Any , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
snake_case_ :List[Any] = [self.sep_token_id]
snake_case_ :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self: str , snake_case: str , snake_case: Optional[str] = None ) -> Tuple[str]:
snake_case_ :Any = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 66 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
# initialize config
if "resnet-50" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-50")
elif "resnet-101" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-101")
else:
raise ValueError("Model name should include either resnet50 or resnet101")
SCREAMING_SNAKE_CASE : str = DetrConfig(use_timm_backbone=_a , backbone_config=_a)
# set label attributes
SCREAMING_SNAKE_CASE : List[str] = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE : Union[str, Any] = 250
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 91
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Union[str, Any] = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r"))
SCREAMING_SNAKE_CASE : int = {int(_a): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase__ ( _a):
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight"))
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight"))
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias"))
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean"))
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var"))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
))
# 3 convs
for i in range(3):
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
f"encoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
f"decoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
])
return rename_keys
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : str = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : int = val
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE : Optional[Any] = ""
if is_panoptic:
SCREAMING_SNAKE_CASE : Optional[int] = "detr."
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(_a , stream=_a).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( _a , _a=None , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = get_detr_config(_a)
# load original model from torch hub
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f"Converting model {model_name}...")
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_a).eval()
SCREAMING_SNAKE_CASE : Tuple = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_a):
if is_panoptic:
SCREAMING_SNAKE_CASE : List[str] = "detr." + src
rename_key(_a , _a , _a)
# query, key and value matrices need special treatment
read_in_q_k_v(_a , is_panoptic=_a)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE : List[Any] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr")
and not key.startswith("class_labels_classifier")
and not key.startswith("bbox_predictor")
):
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Optional[int] = val
elif key.startswith("bbox_attention") or key.startswith("mask_head"):
continue
else:
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : List[Any] = val
else:
if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Any = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : int = DetrForSegmentation(_a) if is_panoptic else DetrForObjectDetection(_a)
model.load_state_dict(_a)
model.eval()
# verify our conversion on an image
SCREAMING_SNAKE_CASE : int = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE : Optional[int] = DetrImageProcessor(format=_a)
SCREAMING_SNAKE_CASE : List[str] = processor(images=prepare_img() , return_tensors="pt")
SCREAMING_SNAKE_CASE : Any = encoding["pixel_values"]
SCREAMING_SNAKE_CASE : Optional[Any] = detr(_a)
SCREAMING_SNAKE_CASE : Any = model(_a)
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3)
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
Path(_a).mkdir(exist_ok=_a)
model.save_pretrained(_a)
processor.save_pretrained(_a)
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub...")
model.push_to_hub(f"nielsr/{model_name}")
processor.push_to_hub(f"nielsr/{model_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
a_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 76 | 0 |
'''simple docstring'''
class a__ :
def __init__( self : Tuple , a : list ):
"""simple docstring"""
__lowerCamelCase = set_counts
__lowerCamelCase = max(a )
__lowerCamelCase = len(a )
__lowerCamelCase = [1] * num_sets
__lowerCamelCase = list(range(a ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : int ):
"""simple docstring"""
__lowerCamelCase = self.get_parent(a )
__lowerCamelCase = self.get_parent(a )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__lowerCamelCase = 0
__lowerCamelCase = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__lowerCamelCase = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__lowerCamelCase = 0
__lowerCamelCase = src_parent
__lowerCamelCase = self.set_counts[src_parent]
__lowerCamelCase = max(self.max_set , a )
return True
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : int ):
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
__lowerCamelCase = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 67 |
import os
def lowerCamelCase__ ( ):
with open(os.path.dirname(_a) + "/p022_names.txt") as file:
SCREAMING_SNAKE_CASE : List[str] = str(file.readlines()[0])
SCREAMING_SNAKE_CASE : List[Any] = names.replace("\"" , "").split(",")
names.sort()
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Dict = 0
for i, name in enumerate(_a):
for letter in name:
name_score += ord(_a) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE : str = 0
return total_score
if __name__ == "__main__":
print(solution()) | 76 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 68 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size))
SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,))
SCREAMING_SNAKE_CASE : int = ya
SCREAMING_SNAKE_CASE : int = xa
for k in range(_a):
SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k])
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
"""simple docstring"""
__UpperCamelCase = range(2, 20 + 1)
__UpperCamelCase = [10**k for k in range(ks[-1] + 1)]
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
snake_case_ = sum(a_i[j] for j in range(UpperCAmelCase , len(UpperCAmelCase ) ) )
snake_case_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase ) , UpperCAmelCase ) ) )
snake_case_ , snake_case_ = 0, 0
snake_case_ = n - i
snake_case_ = memo.get(UpperCAmelCase )
if sub_memo is not None:
snake_case_ = sub_memo.get(UpperCAmelCase )
if jumps is not None and len(UpperCAmelCase ) > 0:
# find and make the largest jump without going over
snake_case_ = -1
for _k in range(len(UpperCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case_ = _k
break
if max_jump >= 0:
snake_case_ , snake_case_ , snake_case_ = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case_ = diff + c
for j in range(min(UpperCAmelCase , len(UpperCAmelCase ) ) ):
snake_case_ , snake_case_ = divmod(UpperCAmelCase , 10 )
if new_c > 0:
add(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
snake_case_ = []
else:
snake_case_ = {c: []}
snake_case_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case_ , snake_case_ = next_term(UpperCAmelCase , k - 1 , i + dn , UpperCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case_ , snake_case_ = compute(UpperCAmelCase , UpperCAmelCase , i + dn , UpperCAmelCase )
diff += _diff
dn += terms_jumped
snake_case_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case_ = 0
while j < len(UpperCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase , (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
if i >= n:
return 0, i
if k > len(UpperCAmelCase ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case_ = i
snake_case_ , snake_case_ , snake_case_ = 0, 0, 0
for j in range(len(UpperCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case_ = ds_c + ds_b
diff += addend
snake_case_ = 0
for j in range(UpperCAmelCase ):
snake_case_ = a_i[j] + addend
snake_case_ , snake_case_ = divmod(UpperCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return diff, i - start_i
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for j in range(UpperCAmelCase , len(UpperCAmelCase ) ):
snake_case_ = digits[j] + addend
if s >= 10:
snake_case_ , snake_case_ = divmod(UpperCAmelCase , 10 )
snake_case_ = addend // 10 + quotient
else:
snake_case_ = s
snake_case_ = addend // 10
if addend == 0:
break
while addend > 0:
snake_case_ , snake_case_ = divmod(UpperCAmelCase , 10 )
digits.append(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase = 10**15 ) -> int:
snake_case_ = [1]
snake_case_ = 1
snake_case_ = 0
while True:
snake_case_ , snake_case_ = next_term(UpperCAmelCase , 20 , i + dn , UpperCAmelCase )
dn += terms_jumped
if dn == n - i:
break
snake_case_ = 0
for j in range(len(UpperCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 69 |
def lowerCamelCase__ ( _a , _a):
return int((input_a, input_a).count(1) != 0)
def lowerCamelCase__ ( ):
assert or_gate(0 , 0) == 0
assert or_gate(0 , 1) == 1
assert or_gate(1 , 0) == 1
assert or_gate(1 , 1) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1)) | 76 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 70 |
a_ = 8.314_4598
def lowerCamelCase__ ( _a , _a):
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K")
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol")
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a_ = 300
a_ = 28
a_ = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''') | 76 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] =(DPMSolverSDEScheduler,)
UpperCamelCase__ : Tuple =1_0
def __lowercase ( self , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ={
'num_train_timesteps': 1100,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**lowerCamelCase__ )
return config
def __lowercase ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =self.scheduler_classes[0]
__UpperCamelCase : Dict =self.get_scheduler_config()
__UpperCamelCase : Union[str, Any] =scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCamelCase : Dict =self.dummy_model()
__UpperCamelCase : Optional[int] =self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCamelCase : Tuple =sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase : Union[str, Any] =scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : int =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =output.prev_sample
__UpperCamelCase : List[Any] =torch.sum(torch.abs(lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =self.scheduler_classes[0]
__UpperCamelCase : int =self.get_scheduler_config(prediction_type='v_prediction' )
__UpperCamelCase : Optional[Any] =scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCamelCase : Optional[Any] =self.dummy_model()
__UpperCamelCase : Tuple =self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCamelCase : List[Any] =sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase : List[Any] =scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =output.prev_sample
__UpperCamelCase : int =torch.sum(torch.abs(lowerCamelCase__ ) )
__UpperCamelCase : Dict =torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =self.scheduler_classes[0]
__UpperCamelCase : str =self.get_scheduler_config()
__UpperCamelCase : Tuple =scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ )
__UpperCamelCase : str =self.dummy_model()
__UpperCamelCase : Any =self.dummy_sample_deter.to(lowerCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__UpperCamelCase : Tuple =scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : int =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =output.prev_sample
__UpperCamelCase : List[Any] =torch.sum(torch.abs(lowerCamelCase__ ) )
__UpperCamelCase : Any =torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.scheduler_classes[0]
__UpperCamelCase : Optional[int] =self.get_scheduler_config()
__UpperCamelCase : Optional[Any] =scheduler_class(**lowerCamelCase__ , use_karras_sigmas=lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ )
__UpperCamelCase : Tuple =self.dummy_model()
__UpperCamelCase : Optional[Any] =self.dummy_sample_deter.to(lowerCamelCase__ ) * scheduler.init_noise_sigma
__UpperCamelCase : int =sample.to(lowerCamelCase__ )
for t in scheduler.timesteps:
__UpperCamelCase : List[str] =scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =output.prev_sample
__UpperCamelCase : Optional[int] =torch.sum(torch.abs(lowerCamelCase__ ) )
__UpperCamelCase : Union[str, Any] =torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 71 |
a_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : int = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE : int = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
# get the last node from the path
SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE : List[str] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE : List[Any] = list(_a)
new_path.append(_a)
queue.append(_a)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_a)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase__ ( _a , _a , _a):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE : str = [start]
SCREAMING_SNAKE_CASE : Optional[Any] = set(_a)
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE : Union[str, Any] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
if node == target:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_a)
queue.append(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4 | 76 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : Tuple = tokenizer("Hello there" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : str = model(a , labels=a ).loss
SCREAMING_SNAKE_CASE : Any = -tf.math.reduce_mean(a ).numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 ) | 76 | 0 |
from bisect import bisect
from itertools import accumulate
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = sorted(zip(lowerCamelCase__ , lowerCamelCase__ ) , key=lambda lowerCamelCase__ : x[0] / x[1] , reverse=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Any = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase : List[str] = list(accumulate(lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = bisect(lowerCamelCase__ , lowerCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
from math import factorial
def lowerCamelCase__ ( _a , _a , _a):
if successes > trials:
raise ValueError("successes must be lower or equal to trials")
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers")
if not isinstance(_a , _a) or not isinstance(_a , _a):
raise ValueError("the function is defined for non-negative integers")
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0")
SCREAMING_SNAKE_CASE : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
SCREAMING_SNAKE_CASE : List[Any] = float(factorial(_a))
coefficient /= factorial(_a) * factorial(trials - successes)
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75)) | 76 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_lowercase = '''examples/'''
_lowercase = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_lowercase = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
_lowercase = '''README.md'''
def _snake_case ( snake_case__ : str , snake_case__ : int , snake_case__ : Dict ):
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.read()
A , A = REPLACE_PATTERNS[pattern]
A = replace.replace('VERSION' , snake_case__ )
A = re_pattern.sub(snake_case__ , snake_case__ )
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(snake_case__ )
def _snake_case ( snake_case__ : Tuple ):
for folder, directories, fnames in os.walk(snake_case__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(snake_case__ , snake_case__ ) , snake_case__ , pattern='examples' )
def _snake_case ( snake_case__ : List[str] , snake_case__ : List[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case__ , snake_case__ , snake_case__ )
if not patch:
update_version_in_examples(snake_case__ )
def _snake_case ( ):
A = '🤗 Transformers currently provides the following architectures'
A = '1. Want to contribute a new model?'
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
# Find the start of the list.
A = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
A = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(snake_case__ )
def _snake_case ( ):
with open(REPLACE_FILES['init'] , 'r' ) as f:
A = f.read()
A = REPLACE_PATTERNS['init'][0].search(snake_case__ ).groups()[0]
return packaging.version.parse(snake_case__ )
def _snake_case ( snake_case__ : Dict=False ):
A = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
A = default_version.base_version
elif patch:
A = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
A = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
A = input(F'Which version are you releasing? [{default_version}]' )
if len(snake_case__ ) == 0:
A = default_version
print(F'Updating version to {version}.' )
global_version_update(snake_case__ , patch=snake_case__ )
def _snake_case ( ):
A = get_version()
A = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
A = current_version.base_version
# Check with the user we got that right.
A = input(F'Which version are we developing now? [{dev_version}]' )
if len(snake_case__ ) == 0:
A = dev_version
print(F'Updating version to {version}.' )
global_version_update(snake_case__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_lowercase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 74 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =CustomTokenizer
pass | 76 | 0 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
def __call__( self ):
"""simple docstring"""
lowerCamelCase_ =torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), )
lowerCamelCase_ =1
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample
lowerCamelCase_ =self.scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ).prev_sample
lowerCamelCase_ =scheduler_output - scheduler_output + torch.ones_like(lowerCAmelCase )
return result
| 75 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a_ = logging.getLogger(__name__)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = False
def __UpperCamelCase ( self : str , a : str , a : Optional[int] , a : Any , a : str ) -> List[Any]:
"""simple docstring"""
if not self.initialized:
SCREAMING_SNAKE_CASE : List[str] = RagRetriever(
a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , )
SCREAMING_SNAKE_CASE : Optional[int] = True
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self.retriever.index.init_index()
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.retriever._main_retrieve(a , a )
return doc_ids, retrieved_doc_embeds
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Tuple , a : Any , a : Tuple , a : Tuple , a : Tuple , a : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
if index is not None and index.is_initialized() and len(a ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , )
SCREAMING_SNAKE_CASE : Optional[Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(a , a , a , a )
for worker in self.retrieval_workers
] )
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Any ) -> int:
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
SCREAMING_SNAKE_CASE : Optional[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = ray.get(random_worker.retrieve.remote(a , a ) )
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self._main_retrieve(a , a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a )
@classmethod
def __UpperCamelCase ( cls : str , a : Optional[Any] , a : Any=None , **a : List[Any] ) -> str:
"""simple docstring"""
return super(a , cls ).get_tokenizers(a , a , **a )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , a : int , a : Any , a : List[Any]=None , **a : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = kwargs.pop("config" , a ) or RagConfig.from_pretrained(a , **a )
SCREAMING_SNAKE_CASE : List[Any] = RagTokenizer.from_pretrained(a , config=a )
SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.question_encoder
SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
SCREAMING_SNAKE_CASE : str = "custom"
SCREAMING_SNAKE_CASE : List[Any] = CustomHFIndex(config.retrieval_vector_size , a )
else:
SCREAMING_SNAKE_CASE : List[str] = cls._build_index(a )
return cls(
a , question_encoder_tokenizer=a , generator_tokenizer=a , retrieval_workers=a , index=a , ) | 76 | 0 |
"""simple docstring"""
def a_ ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : set ):
'''simple docstring'''
lowercase__ , lowercase__ : Tuple = len(_lowerCAmelCase ), len(grid[0] )
if (
min(_lowerCAmelCase , _lowerCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ : Optional[int] = 0
count += depth_first_search(_lowerCAmelCase , row + 1 , _lowerCAmelCase , _lowerCAmelCase )
count += depth_first_search(_lowerCAmelCase , row - 1 , _lowerCAmelCase , _lowerCAmelCase )
count += depth_first_search(_lowerCAmelCase , _lowerCAmelCase , col + 1 , _lowerCAmelCase )
count += depth_first_search(_lowerCAmelCase , _lowerCAmelCase , col - 1 , _lowerCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = data
SCREAMING_SNAKE_CASE : int = None
def __repr__( self : str ) -> str:
"""simple docstring"""
return F"Node({self.data})"
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = None
def __iter__( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self : str ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
return "->".join([str(a ) for item in self] )
def __getitem__( self : List[Any] , a : int ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Tuple , a : int , a : Any ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
SCREAMING_SNAKE_CASE : str = self.head
for _ in range(a ):
SCREAMING_SNAKE_CASE : str = current.next
SCREAMING_SNAKE_CASE : Any = data
def __UpperCamelCase ( self : List[str] , a : Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , a )
def __UpperCamelCase ( self : Union[str, Any] , a : Any ) -> None:
"""simple docstring"""
self.insert_nth(0 , a )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Any ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
SCREAMING_SNAKE_CASE : Any = Node(a )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Optional[int] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : List[Any] = new_node
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Optional[int] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next
SCREAMING_SNAKE_CASE : int = new_node
def __UpperCamelCase ( self : Optional[int] ) -> None: # print every node data
"""simple docstring"""
print(self )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def __UpperCamelCase ( self : Any ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __UpperCamelCase ( self : List[str] , a : int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
SCREAMING_SNAKE_CASE : Tuple = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next.next
return delete_node.data
def __UpperCamelCase ( self : List[Any] ) -> bool:
"""simple docstring"""
return self.head is None
def __UpperCamelCase ( self : Optional[int] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : str = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Any = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : List[Any] = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : Any = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : str = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : Optional[Any] = prev
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(_a) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10):
assert len(_a) == i
linked_list.insert_nth(_a , i + 1)
assert str(_a) == "->".join(str(_a) for i in range(1 , 11))
linked_list.insert_head(0)
linked_list.insert_tail(11)
assert str(_a) == "->".join(str(_a) for i in range(0 , 12))
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9) == 10
assert linked_list.delete_tail() == 11
assert len(_a) == 9
assert str(_a) == "->".join(str(_a) for i in range(1 , 10))
assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True
for i in range(0 , 9):
SCREAMING_SNAKE_CASE : str = -i
assert all(linked_list[i] == -i for i in range(0 , 9)) is True
linked_list.reverse()
assert str(_a) == "->".join(str(_a) for i in range(-8 , 1))
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = [
-9,
100,
Node(77345112),
"dlrow olleH",
7,
5555,
0,
-192.5_5555,
"Hello, world!",
77.9,
Node(10),
None,
None,
12.20,
]
SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(_a)
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_a) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : Any = linked_list.delete_nth(10)
assert result is None
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!"))
assert (
str(_a)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_a)
assert (
str(_a)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_a)
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase__ ( ):
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head ").strip())
linked_list.insert_head(input("Inserting 2nd at head ").strip())
print("\nPrint list:")
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail ").strip())
linked_list.insert_tail(input("Inserting 2nd at tail ").strip())
print("\nPrint list:")
linked_list.print_list()
print("\nDelete head")
linked_list.delete_head()
print("Delete tail")
linked_list.delete_tail()
print("\nPrint list:")
linked_list.print_list()
print("\nReverse linked list")
linked_list.reverse()
print("\nPrint list:")
linked_list.print_list()
print("\nString representation of linked list:")
print(_a)
print("\nReading/changing Node data using indexing:")
print(f"Element at Position 1: {linked_list[1]}")
SCREAMING_SNAKE_CASE : Dict = input("Enter New Value: ").strip()
print("New list:")
print(_a)
print(f"length of linked_list is : {len(_a)}")
if __name__ == "__main__":
main() | 76 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(lowercase_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 78 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ = get_logger(__name__)
class _UpperCamelCase ( enum.Enum ):
'''simple docstring'''
lowerCamelCase__ ='all_checks'
lowerCamelCase__ ='basic_checks'
lowerCamelCase__ ='no_checks'
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a , _a=None):
if expected_checksums is None:
logger.info("Unable to verify checksums.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedDownloadedFile(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : str = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE : Tuple = " for " + verification_name if verification_name is not None else ""
if len(_a) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error")
logger.info("All the checksums matched successfully" + for_verification_name)
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a):
if expected_splits is None:
logger.info("Unable to verify splits sizes.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreSplits(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedSplits(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : List[str] = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_a) > 0:
raise NonMatchingSplitsSizesError(str(_a))
logger.info("All the splits matched successfully.")
def lowerCamelCase__ ( _a , _a = True):
if record_checksum:
SCREAMING_SNAKE_CASE : List[str] = shaaaa()
with open(_a , "rb") as f:
for chunk in iter(lambda: f.read(1 << 20) , b""):
m.update(_a)
SCREAMING_SNAKE_CASE : Optional[int] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE : List[str] = None
return {"num_bytes": os.path.getsize(_a), "checksum": checksum}
def lowerCamelCase__ ( _a):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False | 76 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __lowercase ( __lowercase , __lowercase=() , __lowercase=None , __lowercase="no" , __lowercase="29500" ) -> Any:
'''simple docstring'''
_A = False
_A = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
_A = True
elif "IPython" in sys.modules:
_A = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
_A = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , __lowercase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
_A = 8
_A = PrepareForLaunch(__lowercase , distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__lowercase , args=__lowercase , nprocs=__lowercase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__lowercase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase , master_addr="127.0.01" , master_port=__lowercase , mixed_precision=__lowercase ):
_A = PrepareForLaunch(__lowercase , distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__lowercase , args=__lowercase , nprocs=__lowercase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_A = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__lowercase )
def __lowercase ( __lowercase , __lowercase=() , __lowercase=2 ) -> List[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
_A = PrepareForLaunch(__lowercase , debug=__lowercase )
start_processes(__lowercase , args=__lowercase , nprocs=__lowercase , start_method="fork" )
| 79 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _a , _a):
# Load checkpoint
SCREAMING_SNAKE_CASE : int = torch.load(_a , map_location="cpu")
SCREAMING_SNAKE_CASE : Dict = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : List[str] = v
else:
SCREAMING_SNAKE_CASE : int = v
SCREAMING_SNAKE_CASE : int = chkpt["params"]
SCREAMING_SNAKE_CASE : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray))}
SCREAMING_SNAKE_CASE : List[Any] = chkpt["dico_word2id"]
SCREAMING_SNAKE_CASE : List[Any] = {s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@" , ""): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"Save PyTorch model to {pytorch_weights_dump_path}")
torch.save(_a , _a)
print(f"Save configuration file to {pytorch_config_dump_path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , indent=2) + "\n")
print(f"Save vocab file to {pytorch_config_dump_path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , indent=2) + "\n")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 76 | 0 |
'''simple docstring'''
from math import factorial, pi
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 80 |
def lowerCamelCase__ ( _a , _a):
_validate_point(_a)
_validate_point(_a)
if len(_a) != len(_a):
raise ValueError("Both points must be in the same n-dimensional space")
return float(sum(abs(a - b) for a, b in zip(_a , _a)))
def lowerCamelCase__ ( _a):
if point:
if isinstance(_a , _a):
for item in point:
if not isinstance(_a , (int, float)):
SCREAMING_SNAKE_CASE : List[Any] = (
"Expected a list of numbers as input, found "
f"{type(_a).__name__}"
)
raise TypeError(_a)
else:
SCREAMING_SNAKE_CASE : List[Any] = f"Expected a list of numbers as input, found {type(_a).__name__}"
raise TypeError(_a)
else:
raise ValueError("Missing an input")
def lowerCamelCase__ ( _a , _a):
_validate_point(_a)
_validate_point(_a)
if len(_a) != len(_a):
raise ValueError("Both points must be in the same n-dimensional space")
return float(sum(abs(x - y) for x, y in zip(_a , _a)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase_ : List[str] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='vit_msn'
def __init__( self : str , a : Tuple=768 , a : Tuple=12 , a : Any=12 , a : int=3072 , a : List[Any]="gelu" , a : Dict=0.0 , a : int=0.0 , a : str=0.02 , a : List[str]=1e-06 , a : List[Any]=224 , a : Union[str, Any]=16 , a : Union[str, Any]=3 , a : Tuple=True , **a : Dict , ) -> List[Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[str] = qkv_bias | 76 | 0 |
from __future__ import annotations
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = str(snake_case )
return n == n[::-1]
def _UpperCAmelCase ( snake_case = 1_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase = 0
for i in range(1 , snake_case ):
if is_palindrome(snake_case ) and is_palindrome(bin(snake_case ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 82 |
import baseaa
def lowerCamelCase__ ( _a):
return baseaa.aaaencode(string.encode("utf-8"))
def lowerCamelCase__ ( _a):
return baseaa.aaadecode(_a).decode("utf-8")
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : Optional[int]=0 ):
'''simple docstring'''
_UpperCamelCase : List[Any] = np.random.RandomState(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Tuple = self.get_dummy_inputs()
_UpperCamelCase : Optional[Any] = pipe(**lowerCamelCase__ ).images
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase : str = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_UpperCamelCase : Dict = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Dict = self.get_dummy_inputs()
_UpperCamelCase : str = pipe(**lowerCamelCase__ ).images
_UpperCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase : Dict = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_UpperCamelCase : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Tuple = self.get_dummy_inputs()
_UpperCamelCase : str = pipe(**lowerCamelCase__ ).images
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase : List[str] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_UpperCamelCase : str = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs()
_UpperCamelCase : Union[str, Any] = pipe(**lowerCamelCase__ ).images
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase : Dict = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_UpperCamelCase : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Any = self.get_dummy_inputs()
_UpperCamelCase : List[Any] = pipe(**lowerCamelCase__ ).images
_UpperCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase : List[Any] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
_UpperCamelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : int = self.get_dummy_inputs()
_UpperCamelCase : Optional[int] = pipe(**lowerCamelCase__ ).images
_UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_UpperCamelCase : Dict = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = self.get_dummy_inputs()
_UpperCamelCase : int = 3 * [inputs['prompt']]
# forward
_UpperCamelCase : Union[str, Any] = pipe(**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = output.images[0, -3:, -3:, -1]
_UpperCamelCase : Dict = self.get_dummy_inputs()
_UpperCamelCase : Optional[int] = 3 * [inputs.pop('prompt' )]
_UpperCamelCase : Tuple = pipe.tokenizer(
lowerCamelCase__ ,padding='max_length' ,max_length=pipe.tokenizer.model_max_length ,truncation=lowerCamelCase__ ,return_tensors='np' ,)
_UpperCamelCase : Dict = text_inputs['input_ids']
_UpperCamelCase : Optional[Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_UpperCamelCase : Tuple = prompt_embeds
# forward
_UpperCamelCase : Dict = pipe(**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : int = self.get_dummy_inputs()
_UpperCamelCase : Dict = 3 * ['this is a negative prompt']
_UpperCamelCase : Optional[int] = negative_prompt
_UpperCamelCase : Tuple = 3 * [inputs['prompt']]
# forward
_UpperCamelCase : Tuple = pipe(**lowerCamelCase__ )
_UpperCamelCase : int = output.images[0, -3:, -3:, -1]
_UpperCamelCase : List[Any] = self.get_dummy_inputs()
_UpperCamelCase : Optional[int] = 3 * [inputs.pop('prompt' )]
_UpperCamelCase : List[str] = []
for p in [prompt, negative_prompt]:
_UpperCamelCase : int = pipe.tokenizer(
lowerCamelCase__ ,padding='max_length' ,max_length=pipe.tokenizer.model_max_length ,truncation=lowerCamelCase__ ,return_tensors='np' ,)
_UpperCamelCase : Optional[Any] = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = embeds
# forward
_UpperCamelCase : Dict = pipe(**lowerCamelCase__ )
_UpperCamelCase : int = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Dict = ort.SessionOptions()
_UpperCamelCase : str = False
return options
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# using the PNDM scheduler by default
_UpperCamelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : List[Any] = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
_UpperCamelCase : int = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=10 ,output_type='np' )
_UpperCamelCase : List[Any] = output.images
_UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase : Tuple = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
_UpperCamelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Any = 'open neural network exchange'
_UpperCamelCase : List[Any] = np.random.RandomState(0 )
_UpperCamelCase : List[Any] = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=lowerCamelCase__ ,output_type='np' )
_UpperCamelCase : Optional[Any] = output.images
_UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase : Optional[int] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Any = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
_UpperCamelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : List[Any] = 'open neural network exchange'
_UpperCamelCase : Dict = np.random.RandomState(0 )
_UpperCamelCase : Union[str, Any] = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=lowerCamelCase__ ,output_type='np' )
_UpperCamelCase : Union[str, Any] = output.images
_UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase : Optional[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = 0
def test_callback_fn(lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : np.ndarray ) -> None:
_UpperCamelCase : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_UpperCamelCase : Any = latents[0, -3:, -3:, -1]
_UpperCamelCase : Dict = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_UpperCamelCase : Union[str, Any] = latents[0, -3:, -3:, -1]
_UpperCamelCase : int = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
_UpperCamelCase : Dict = False
_UpperCamelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Tuple = 'Andromeda galaxy in a bottle'
_UpperCamelCase : Optional[Any] = np.random.RandomState(0 )
pipe(
prompt=lowerCamelCase__ ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=lowerCamelCase__ ,callback=lowerCamelCase__ ,callback_steps=1 ,)
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
assert isinstance(lowerCamelCase__ ,lowerCamelCase__ )
assert pipe.safety_checker is None
_UpperCamelCase : List[Any] = pipe('example prompt' ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCamelCase : Union[str, Any] = pipe('example prompt' ,num_inference_steps=2 ).images[0]
assert image is not None
| 83 |
from datetime import datetime as dt
import os
from github import Github
a_ = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : int = Github(os.environ["GITHUB_TOKEN"])
SCREAMING_SNAKE_CASE : List[str] = g.get_repo("huggingface/transformers")
SCREAMING_SNAKE_CASE : Optional[int] = repo.get_issues(state="open")
for issue in open_issues:
SCREAMING_SNAKE_CASE : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _a: i.created_at , reverse=_a)
SCREAMING_SNAKE_CASE : str = comments[0] if len(_a) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
if __name__ == "__main__":
main() | 76 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : int ) -> Dict:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
lowerCAmelCase_ :List[Any] = len(lowercase__ )
lowerCAmelCase_ :Optional[Any] = max(lowercase__ )
lowerCAmelCase_ :Tuple = min(lowercase__ )
# create the counting array
lowerCAmelCase_ :Optional[Any] = coll_max + 1 - coll_min
lowerCAmelCase_ :int = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
lowerCAmelCase_ :Optional[Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowerCAmelCase_ :Union[str, Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
lowerCAmelCase_ :int = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 84 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
if isinstance(_a , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(_a , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(_a):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}")
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =['pixel_values']
def __init__( self : Optional[Any] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {"shortest_edge": 256}
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE : str = get_size_dict(a , param_name="crop_size" )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop
SCREAMING_SNAKE_CASE : int = crop_size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : Tuple = offset
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : Optional[Any] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(a , size["shortest_edge"] , default_to_square=a )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE : Dict = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a , size=a , resample=a , data_format=a , **a )
def __UpperCamelCase ( self : List[str] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __UpperCamelCase ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : bool = True , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = image.astype(np.floataa )
if offset:
SCREAMING_SNAKE_CASE : Union[str, Any] = image - (scale / 2)
return rescale(a , scale=a , data_format=a , **a )
def __UpperCamelCase ( self : int , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def __UpperCamelCase ( self : Tuple , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : List[str] = to_numpy_array(a )
if do_resize:
SCREAMING_SNAKE_CASE : Optional[Any] = self.resize(image=a , size=a , resample=a )
if do_center_crop:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.center_crop(a , size=a )
if do_rescale:
SCREAMING_SNAKE_CASE : Any = self.rescale(image=a , scale=a , offset=a )
if do_normalize:
SCREAMING_SNAKE_CASE : Tuple = self.normalize(image=a , mean=a , std=a )
SCREAMING_SNAKE_CASE : Optional[int] = to_channel_dimension_format(a , a )
return image
def __UpperCamelCase ( self : Dict , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : int = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : int = size if size is not None else self.size
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(a , param_name="crop_size" )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
SCREAMING_SNAKE_CASE : Optional[int] = make_batched(a )
SCREAMING_SNAKE_CASE : List[Any] = [
[
self._preprocess_image(
image=a , do_resize=a , size=a , resample=a , do_center_crop=a , crop_size=a , do_rescale=a , rescale_factor=a , offset=a , do_normalize=a , image_mean=a , image_std=a , data_format=a , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": videos}
return BatchFeature(data=a , tensor_type=a ) | 76 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : int = (DPMSolverSinglestepScheduler,)
lowerCAmelCase_ : Optional[int] = (("num_inference_steps", 25),)
def lowerCAmelCase__ ( self , **a__ ) -> int:
'''simple docstring'''
snake_case_ = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**a__ )
return config
def lowerCAmelCase__ ( self , a__=0 , **a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = dict(self.forward_default_kwargs )
snake_case_ = kwargs.pop("num_inference_steps" , a__ )
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config(**a__ )
snake_case_ = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
snake_case_ = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ , snake_case_ = sample, sample
for t in range(a__ , time_step + scheduler.config.solver_order + 1 ):
snake_case_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
snake_case_ = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self , a__=0 , **a__ ) -> Any:
'''simple docstring'''
snake_case_ = dict(self.forward_default_kwargs )
snake_case_ = kwargs.pop("num_inference_steps" , a__ )
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
snake_case_ = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
snake_case_ = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase__ ( self , a__=None , **a__ ) -> List[str]:
'''simple docstring'''
if scheduler is None:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**a__ )
snake_case_ = scheduler_class(**a__ )
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**a__ )
snake_case_ = scheduler_class(**a__ )
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = model(a__ , a__ )
snake_case_ = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
snake_case_ = 50
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
snake_case_ = model(a__ , a__ )
snake_case_ = scheduler.step(a__ , a__ , a__ ).prev_sample
snake_case_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=a__ )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
snake_case_ = self.full_loop(scheduler=a__ )
snake_case_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
snake_case_ = DEISMultistepScheduler.from_config(scheduler.config )
snake_case_ = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case_ = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case_ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case_ = self.full_loop(scheduler=a__ )
snake_case_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.check_over_configs(thresholding=a__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type="dpmsolver++" , solver_order=a__ , solver_type=a__ , )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
snake_case_ = self.full_loop(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
assert not torch.isnan(a__ ).any(), "Samples have nan numbers"
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.check_over_configs(lower_order_final=a__ )
self.check_over_configs(lower_order_final=a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self.check_over_configs(variance_type=a__ )
self.check_over_configs(variance_type="learned_range" )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=a__ , time_step=0 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.full_loop()
snake_case_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.full_loop(use_karras_sigmas=a__ )
snake_case_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.full_loop(prediction_type="v_prediction" )
snake_case_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=a__ )
snake_case_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 )
snake_case_ = scheduler_class(**a__ )
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = model(a__ , a__ )
snake_case_ = scheduler.step(a__ , a__ , a__ ).prev_sample
assert sample.dtype == torch.floataa
| 85 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
a_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =ConvBertTokenizer
def __init__( self : List[str] , a : Union[str, Any]=None , a : Optional[int]=None , a : int=True , a : Tuple="[UNK]" , a : Dict="[SEP]" , a : Dict="[PAD]" , a : List[Any]="[CLS]" , a : Tuple="[MASK]" , a : Dict=True , a : Optional[Any]=None , **a : str , ) -> Dict:
"""simple docstring"""
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a ) != do_lower_case
or normalizer_state.get("strip_accents" , a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : List[str] = getattr(a , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : Any = strip_accents
SCREAMING_SNAKE_CASE : Optional[int] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**a )
SCREAMING_SNAKE_CASE : str = do_lower_case
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : int=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Tuple , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(a , name=a )
return tuple(a ) | 76 | 0 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = """ResNetConfig"""
# Base docstring
lowerCamelCase__ = """microsoft/resnet-50"""
lowerCamelCase__ = [1, 2_048, 7, 7]
# Image classification docstring
lowerCamelCase__ = """microsoft/resnet-50"""
lowerCamelCase__ = """tiger cat"""
lowerCamelCase__ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = "relu" ):
super().__init__()
__lowerCAmelCase : Optional[int] = nn.Convad(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , bias=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = ACTaFN[activation] if activation is not None else nn.Identity()
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = self.convolution(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.normalization(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__()
__lowerCAmelCase : Dict = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__lowerCAmelCase : Dict = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__lowerCAmelCase : int = config.num_channels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__lowerCAmelCase : List[str] = self.embedder(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.pooler(_SCREAMING_SNAKE_CASE )
return embedding
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 2 ):
super().__init__()
__lowerCAmelCase : Optional[Any] = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , stride=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.convolution(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.normalization(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = "relu" ):
super().__init__()
__lowerCAmelCase : Any = in_channels != out_channels or stride != 1
__lowerCAmelCase : Any = (
ResNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase : List[str] = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation=_SCREAMING_SNAKE_CASE ) , )
__lowerCAmelCase : Optional[int] = ACTaFN[activation]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = hidden_state
__lowerCAmelCase : List[str] = self.layer(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
__lowerCAmelCase : Union[str, Any] = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = "relu" , _SCREAMING_SNAKE_CASE = 4 ):
super().__init__()
__lowerCAmelCase : Tuple = in_channels != out_channels or stride != 1
__lowerCAmelCase : Dict = out_channels // reduction
__lowerCAmelCase : Any = (
ResNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase : Any = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE ) , )
__lowerCAmelCase : str = ACTaFN[activation]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = hidden_state
__lowerCAmelCase : Optional[Any] = self.layer(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
__lowerCAmelCase : str = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , ):
super().__init__()
__lowerCAmelCase : str = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
__lowerCAmelCase : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , *[layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = input
for layer in self.layers:
__lowerCAmelCase : Optional[int] = layer(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__()
__lowerCAmelCase : int = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowerCAmelCase : Any = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_SCREAMING_SNAKE_CASE , config.depths[1:] ):
self.stages.append(ResNetStage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , depth=_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True ):
__lowerCAmelCase : List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCAmelCase : Union[str, Any] = hidden_states + (hidden_state,)
__lowerCAmelCase : List[str] = stage_module(_SCREAMING_SNAKE_CASE )
if output_hidden_states:
__lowerCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE , )
class A__ ( _lowerCamelCase):
A_ : Tuple = ResNetConfig
A_ : List[Any] = 'resnet'
A_ : Optional[int] = 'pixel_values'
A_ : int = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = value
lowerCamelCase__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , _lowerCamelCase , )
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = config
__lowerCAmelCase : str = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = ResNetEncoder(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Any = self.embedder(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = encoder_outputs[0]
__lowerCAmelCase : Any = self.pooler(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , _lowerCamelCase , )
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = config.num_labels
__lowerCAmelCase : Any = ResNetModel(_SCREAMING_SNAKE_CASE )
# classification head
__lowerCAmelCase : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ):
__lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Any = self.resnet(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = outputs.pooler_output if return_dict else outputs[1]
__lowerCAmelCase : Union[str, Any] = self.classifier(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase : Optional[Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase : List[Any] = 'single_label_classification'
else:
__lowerCAmelCase : Optional[int] = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowerCAmelCase : int = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase : Tuple = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCAmelCase : List[str] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase : List[Any] = CrossEntropyLoss()
__lowerCAmelCase : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase : Tuple = BCEWithLogitsLoss()
__lowerCAmelCase : str = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
__lowerCAmelCase : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , _lowerCamelCase , )
class A__ ( _lowerCamelCase , _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__(_SCREAMING_SNAKE_CASE )
super()._init_backbone(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [config.embedding_size] + config.hidden_sizes
__lowerCAmelCase : Optional[int] = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = ResNetEncoder(_SCREAMING_SNAKE_CASE )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@replace_return_docstrings(output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : int = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : Optional[int] = self.embedder(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.encoder(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = outputs.hidden_states
__lowerCAmelCase : Any = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__lowerCAmelCase : Any = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_SCREAMING_SNAKE_CASE , ) | 86 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_a)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : Union[str, Any] = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(_a , id=_a) | 76 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class snake_case_ :
__A : List[str] = BlenderbotSmallConfig
__A : Union[str, Any] = {}
__A : Union[str, Any] = "gelu"
def __init__( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : str=True , lowercase_ : Optional[Any]=False , lowercase_ : Dict=99 , lowercase_ : Optional[Any]=32 , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=4 , lowercase_ : List[str]=37 , lowercase_ : Any=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Union[str, Any]=20 , lowercase_ : Optional[Any]=2 , lowercase_ : int=1 , lowercase_ : Union[str, Any]=0 , ) -> List[str]:
lowercase__ : int = parent
lowercase__ : Dict = batch_size
lowercase__ : List[str] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Any = use_labels
lowercase__ : Dict = vocab_size
lowercase__ : Dict = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : int = max_position_embeddings
lowercase__ : Tuple = eos_token_id
lowercase__ : Optional[int] = pad_token_id
lowercase__ : Tuple = bos_token_id
def __UpperCamelCase ( self : Any ) -> Tuple:
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase__ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase__ : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase__ : Optional[Any] = prepare_blenderbot_small_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def __UpperCamelCase ( self : Tuple , lowercase_ : int , lowercase_ : str ) -> Any:
lowercase__ : Dict = TFBlenderbotSmallModel(config=lowercase_ ).get_decoder()
lowercase__ : Union[str, Any] = inputs_dict["input_ids"]
lowercase__ : str = input_ids[:1, :]
lowercase__ : Any = inputs_dict["attention_mask"][:1, :]
lowercase__ : Union[str, Any] = inputs_dict["head_mask"]
lowercase__ : Optional[int] = 1
# first forward pass
lowercase__ : Dict = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
lowercase__ , lowercase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase__ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase__ : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase__ : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase__ : List[Any] = model(lowercase_ , attention_mask=lowercase_ )[0]
lowercase__ : str = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase__ : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase__ : str = output_from_no_past[:, -3:, random_slice_idx]
lowercase__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1E-3 )
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Any=None , _lowerCamelCase : Union[str, Any]=None , ):
if attention_mask is None:
lowercase__ : Optional[int] = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
lowercase__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
lowercase__ : int = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
lowercase__ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
lowercase__ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : Optional[int] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__A : List[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__A : str = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any = True
__A : Tuple = False
__A : Union[str, Any] = False
def __UpperCamelCase ( self : Dict ) -> List[Any]:
lowercase__ : List[str] = TFBlenderbotSmallModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=lowercase_ )
def __UpperCamelCase ( self : str ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
@require_tokenizers
@require_tf
class snake_case_ ( unittest.TestCase ):
__A : Union[str, Any] = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
__A : str = "facebook/blenderbot_small-90M"
@cached_property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __UpperCamelCase ( self : Any ) -> int:
lowercase__ : Tuple = self.tokenizer(self.src_text , return_tensors="tf" )
lowercase__ : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase_ , )
lowercase__ : str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase_ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 87 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , a : int , a : Optional[int]=13 , a : Optional[int]=3 , a : int=224 , a : Optional[int]=30 , a : int=400 , a : Union[str, Any]=True , a : int=None , a : Tuple=True , a : Tuple=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : str = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : int = do_normalize
SCREAMING_SNAKE_CASE : Tuple = image_mean
SCREAMING_SNAKE_CASE : Tuple = image_std
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , ) | 76 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __get__( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any=None ) -> str:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
__magic_name__ = """__cached_""" + self.fget.__name__
__magic_name__ = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if cached is None:
__magic_name__ = self.fget(UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return cached
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( A_ ):
'''simple docstring'''
if is_torch_fx_proxy(A_ ):
return True
if is_torch_available():
import torch
if isinstance(A_, torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(A_, tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(A_, (jnp.ndarray, Tracer) ):
return True
return isinstance(A_, np.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return isinstance(A_, np.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return _is_numpy(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
return isinstance(A_, torch.Tensor )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
return isinstance(A_, torch.device )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
if isinstance(A_, A_ ):
if hasattr(A_, A_ ):
__magic_name__ = getattr(A_, A_ )
else:
return False
return isinstance(A_, torch.dtype )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(A_ )
def a__ ( A_ ):
'''simple docstring'''
import tensorflow as tf
return isinstance(A_, tf.Tensor )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(A_ )
def a__ ( A_ ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(A_, """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(A_ )
return type(A_ ) == tf.Tensor
def a__ ( A_ ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(A_ )
def a__ ( A_ ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(A_, jnp.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(A_ )
def a__ ( A_ ):
'''simple docstring'''
if isinstance(A_, (dict, UserDict) ):
return {k: to_py_obj(A_ ) for k, v in obj.items()}
elif isinstance(A_, (list, tuple) ):
return [to_py_obj(A_ ) for o in obj]
elif is_tf_tensor(A_ ):
return obj.numpy().tolist()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(A_ ):
return np.asarray(A_ ).tolist()
elif isinstance(A_, (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( A_ ):
'''simple docstring'''
if isinstance(A_, (dict, UserDict) ):
return {k: to_numpy(A_ ) for k, v in obj.items()}
elif isinstance(A_, (list, tuple) ):
return np.array(A_ )
elif is_tf_tensor(A_ ):
return obj.numpy()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(A_ ):
return np.asarray(A_ )
else:
return obj
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = fields(self )
# Safety and consistency checks
if not len(UpperCamelCase__ ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
__magic_name__ = getattr(self , class_fields[0].name )
__magic_name__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = first_field.items()
__magic_name__ = True
else:
try:
__magic_name__ = iter(UpperCamelCase__ )
__magic_name__ = True
except TypeError:
__magic_name__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCamelCase__ ):
if (
not isinstance(UpperCamelCase__ , (list, tuple) )
or not len(UpperCamelCase__ ) == 2
or not isinstance(element[0] , UpperCamelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__magic_name__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__magic_name__ = element[1]
elif first_field is not None:
__magic_name__ = first_field
else:
for field in class_fields:
__magic_name__ = getattr(self , field.name )
if v is not None:
__magic_name__ = v
def __delitem__( self : Any , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any ) -> Optional[Any]:
"""simple docstring"""
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : Optional[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : Any , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : List[str] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Any , UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCamelCase__ , UpperCamelCase__ )
super().__setattr__(UpperCamelCase__ , UpperCamelCase__ )
def __setitem__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
super().__setitem__(UpperCamelCase__ , UpperCamelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : str ) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """longest"""
a__ = """max_length"""
a__ = """do_not_pad"""
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pt"""
a__ = """tf"""
a__ = """np"""
a__ = """jax"""
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : List[ContextManager] ) -> Any:
"""simple docstring"""
__magic_name__ = context_managers
__magic_name__ = ExitStack()
def __enter__( self : Optional[int] ) -> int:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(UpperCamelCase__ )
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
self.stack.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = infer_framework(A_ )
if framework == "tf":
__magic_name__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__ = inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = model_class.__name__
__magic_name__ = infer_framework(A_ )
if framework == "tf":
__magic_name__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__ = inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( A_, A_ = "", A_ = "." ):
'''simple docstring'''
def _flatten_dict(A_, A_="", A_="." ):
for k, v in d.items():
__magic_name__ = str(A_ ) + delimiter + str(A_ ) if parent_key else k
if v and isinstance(A_, A_ ):
yield from flatten_dict(A_, A_, delimiter=A_ ).items()
else:
yield key, v
return dict(_flatten_dict(A_, A_, A_ ) )
@contextmanager
def a__ ( A_, A_ = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( A_, A_=None ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.transpose(A_, axes=A_ )
elif is_torch_tensor(A_ ):
return array.T if axes is None else array.permute(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.transpose(A_, perm=A_ )
elif is_jax_tensor(A_ ):
return jnp.transpose(A_, axes=A_ )
else:
raise ValueError(f'''Type not supported for transpose: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.reshape(A_, A_ )
elif is_torch_tensor(A_ ):
return array.reshape(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.reshape(A_, A_ )
elif is_jax_tensor(A_ ):
return jnp.reshape(A_, A_ )
else:
raise ValueError(f'''Type not supported for reshape: {type(A_ )}.''' )
def a__ ( A_, A_=None ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.squeeze(A_, axis=A_ )
elif is_torch_tensor(A_ ):
return array.squeeze() if axis is None else array.squeeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.squeeze(A_, axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.squeeze(A_, axis=A_ )
else:
raise ValueError(f'''Type not supported for squeeze: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.expand_dims(A_, A_ )
elif is_torch_tensor(A_ ):
return array.unsqueeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.expand_dims(A_, axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.expand_dims(A_, axis=A_ )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(A_ )}.''' )
def a__ ( A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.size(A_ )
elif is_torch_tensor(A_ ):
return array.numel()
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.size(A_ )
elif is_jax_tensor(A_ ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(A_, (tuple, list) ):
__magic_name__ = [f'''{repo_id}--{v}''' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
__magic_name__ = f'''{repo_id}--{value}'''
return auto_map
def a__ ( A_ ):
'''simple docstring'''
for base_class in inspect.getmro(A_ ):
__magic_name__ = base_class.__module__
__magic_name__ = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 88 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Any = tokenizer(example["content"] , truncation=_a)["input_ids"]
SCREAMING_SNAKE_CASE : Dict = len(example["content"]) / len(output["input_ids"])
return output
a_ = HfArgumentParser(PretokenizationArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a_ = time.time()
a_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 76 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[str] = 'open-llama'
def __init__( self : List[str] ,_UpperCAmelCase : str=100000 ,_UpperCAmelCase : Optional[int]=4096 ,_UpperCAmelCase : Dict=11008 ,_UpperCAmelCase : Dict=32 ,_UpperCAmelCase : Dict=32 ,_UpperCAmelCase : int="silu" ,_UpperCAmelCase : Tuple=2048 ,_UpperCAmelCase : Optional[int]=0.02 ,_UpperCAmelCase : List[Any]=1E-6 ,_UpperCAmelCase : int=True ,_UpperCAmelCase : int=0 ,_UpperCAmelCase : List[str]=1 ,_UpperCAmelCase : int=2 ,_UpperCAmelCase : List[str]=False ,_UpperCAmelCase : List[str]=True ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : Any=0.1 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : Any=None ,**_UpperCAmelCase : Union[str, Any] ,):
_a : str = vocab_size
_a : Dict = max_position_embeddings
_a : Tuple = hidden_size
_a : Any = intermediate_size
_a : Optional[int] = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Union[str, Any] = hidden_act
_a : List[str] = initializer_range
_a : List[str] = rms_norm_eps
_a : str = use_cache
_a : Tuple = kwargs.pop(
'use_memorry_efficient_attention' ,_UpperCAmelCase )
_a : Any = hidden_dropout_prob
_a : str = attention_dropout_prob
_a : List[str] = use_stable_embedding
_a : Tuple = shared_input_output_embedding
_a : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,tie_word_embeddings=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"""got {self.rope_scaling}""" )
_a : Optional[Any] = self.rope_scaling.get('type' ,_UpperCAmelCase )
_a : List[str] = self.rope_scaling.get('factor' ,_UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 89 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
# initialize config
if "resnet-50" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-50")
elif "resnet-101" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-101")
else:
raise ValueError("Model name should include either resnet50 or resnet101")
SCREAMING_SNAKE_CASE : str = DetrConfig(use_timm_backbone=_a , backbone_config=_a)
# set label attributes
SCREAMING_SNAKE_CASE : List[str] = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE : Union[str, Any] = 250
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 91
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Union[str, Any] = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r"))
SCREAMING_SNAKE_CASE : int = {int(_a): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase__ ( _a):
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight"))
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight"))
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias"))
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean"))
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var"))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
))
# 3 convs
for i in range(3):
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
f"encoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
f"decoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
])
return rename_keys
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : str = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : int = val
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE : Optional[Any] = ""
if is_panoptic:
SCREAMING_SNAKE_CASE : Optional[int] = "detr."
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(_a , stream=_a).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( _a , _a=None , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = get_detr_config(_a)
# load original model from torch hub
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f"Converting model {model_name}...")
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_a).eval()
SCREAMING_SNAKE_CASE : Tuple = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_a):
if is_panoptic:
SCREAMING_SNAKE_CASE : List[str] = "detr." + src
rename_key(_a , _a , _a)
# query, key and value matrices need special treatment
read_in_q_k_v(_a , is_panoptic=_a)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE : List[Any] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr")
and not key.startswith("class_labels_classifier")
and not key.startswith("bbox_predictor")
):
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Optional[int] = val
elif key.startswith("bbox_attention") or key.startswith("mask_head"):
continue
else:
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : List[Any] = val
else:
if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Any = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : int = DetrForSegmentation(_a) if is_panoptic else DetrForObjectDetection(_a)
model.load_state_dict(_a)
model.eval()
# verify our conversion on an image
SCREAMING_SNAKE_CASE : int = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE : Optional[int] = DetrImageProcessor(format=_a)
SCREAMING_SNAKE_CASE : List[str] = processor(images=prepare_img() , return_tensors="pt")
SCREAMING_SNAKE_CASE : Any = encoding["pixel_values"]
SCREAMING_SNAKE_CASE : Optional[Any] = detr(_a)
SCREAMING_SNAKE_CASE : Any = model(_a)
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3)
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
Path(_a).mkdir(exist_ok=_a)
model.save_pretrained(_a)
processor.save_pretrained(_a)
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub...")
model.push_to_hub(f"nielsr/{model_name}")
processor.push_to_hub(f"nielsr/{model_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
a_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 76 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError('input must be a negative integer' )
__lowerCamelCase = len(bin(UpperCamelCase__ )[3:] )
__lowerCamelCase = bin(abs(UpperCamelCase__ ) - (1 << binary_number_length) )[3:]
__lowerCamelCase = (
(
'1'
+ '0' * (binary_number_length - len(UpperCamelCase__ ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 |
import os
def lowerCamelCase__ ( ):
with open(os.path.dirname(_a) + "/p022_names.txt") as file:
SCREAMING_SNAKE_CASE : List[str] = str(file.readlines()[0])
SCREAMING_SNAKE_CASE : List[Any] = names.replace("\"" , "").split(",")
names.sort()
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Dict = 0
for i, name in enumerate(_a):
for letter in name:
name_score += ord(_a) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE : str = 0
return total_score
if __name__ == "__main__":
print(solution()) | 76 | 0 |
"""simple docstring"""
def _A (__a , __a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [1]
for i in range(2 , __a ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Dict = list(range(__a ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE_ : List[str] = factorials.pop()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = divmod(__a , __a )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size))
SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,))
SCREAMING_SNAKE_CASE : int = ya
SCREAMING_SNAKE_CASE : int = xa
for k in range(_a):
SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k])
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
UpperCamelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
_a : str
_a : List[str]
_a : Optional[List[str]]
@dataclass
class a__ :
_a : List[int]
_a : List[int]
_a : Optional[List[int]] = None
_a : Optional[List[int]] = None
class a__ ( snake_case__ ):
_a : Optional[int] = """train"""
_a : int = """dev"""
_a : Tuple = """test"""
class a__ :
@staticmethod
def __SCREAMING_SNAKE_CASE( _A , _A ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __SCREAMING_SNAKE_CASE( _A ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __SCREAMING_SNAKE_CASE( _A , _A , _A , _A , _A=False , _A="[CLS]" , _A=1 , _A="[SEP]" , _A=False , _A=False , _A=0 , _A=0 , _A=-1_0_0 , _A=0 , _A=True , ):
"""simple docstring"""
__lowerCAmelCase = {label: i for i, label in enumerate(_A )}
__lowerCAmelCase = []
for ex_index, example in enumerate(_A ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d" , _A , len(_A ) )
__lowerCAmelCase = []
__lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
__lowerCAmelCase = tokenizer.tokenize(_A )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_A ) > 0:
tokens.extend(_A )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_A ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(_A ) > max_seq_length - special_tokens_count:
__lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
__lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__lowerCAmelCase = [sequence_a_segment_id] * len(_A )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__lowerCAmelCase = [cls_token] + tokens
__lowerCAmelCase = [pad_token_label_id] + label_ids
__lowerCAmelCase = [cls_token_segment_id] + segment_ids
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(_A )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(_A )
# Zero-pad up to the sequence length.
__lowerCAmelCase = max_seq_length - len(_A )
if pad_on_left:
__lowerCAmelCase = ([pad_token] * padding_length) + input_ids
__lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(_A ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(_A ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(_A ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(_A ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(_A ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=_A , attention_mask=_A , token_type_ids=_A , label_ids=_A ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class a__ ( snake_case__ ):
_a : List[InputFeatures]
_a : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , _A , _A , _A , _A , _A , _A = None , _A=False , _A = Split.train , ):
"""simple docstring"""
__lowerCAmelCase = os.path.join(
_A , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(_A ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + ".lock"
with FileLock(_A ):
if os.path.exists(_A ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__lowerCAmelCase = torch.load(_A )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__lowerCAmelCase = token_classification_task.read_examples_from_file(_A , _A )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCAmelCase = token_classification_task.convert_examples_to_features(
_A , _A , _A , _A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _A )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _A ):
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class a__ :
_a : List[InputFeatures]
_a : int = -1_0_0
def __init__( self , _A , _A , _A , _A , _A , _A = None , _A=False , _A = Split.train , ):
"""simple docstring"""
__lowerCAmelCase = token_classification_task.read_examples_from_file(_A , _A )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCAmelCase = token_classification_task.convert_examples_to_features(
_A , _A , _A , _A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCAmelCase = tf.data.Dataset.from_generator(
_A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__lowerCAmelCase = tf.data.Dataset.from_generator(
_A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _A ):
"""simple docstring"""
return self.features[i]
| 92 |
def lowerCamelCase__ ( _a , _a):
return int((input_a, input_a).count(1) != 0)
def lowerCamelCase__ ( ):
assert or_gate(0 , 0) == 0
assert or_gate(0 , 1) == 1
assert or_gate(1 , 0) == 1
assert or_gate(1 , 1) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1)) | 76 | 0 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
_lowercase : int = "naver-clova-ix/donut-base"
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = DonutProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase_ : Optional[int] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase_ : str = self.processor.tokenajson(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 93 |
a_ = 8.314_4598
def lowerCamelCase__ ( _a , _a):
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K")
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol")
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a_ = 300
a_ = 28
a_ = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''') | 76 | 0 |
import os
from distutils.util import strtobool
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ):
"""simple docstring"""
for e in env_keys:
a :Optional[int] = int(os.environ.get(UpperCAmelCase_ , -1 ) )
if val >= 0:
return val
return default
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]=False ):
"""simple docstring"""
a :Dict = os.environ.get(UpperCAmelCase_ , str(UpperCAmelCase_ ) )
return strtobool(UpperCAmelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def __lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple="no" ):
"""simple docstring"""
a :List[str] = os.environ.get(UpperCAmelCase_ , str(UpperCAmelCase_ ) )
return value
| 94 |
a_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : int = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE : int = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
# get the last node from the path
SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE : List[str] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE : List[Any] = list(_a)
new_path.append(_a)
queue.append(_a)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_a)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase__ ( _a , _a , _a):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE : str = [start]
SCREAMING_SNAKE_CASE : Optional[Any] = set(_a)
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE : Union[str, Any] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
if node == target:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_a)
queue.append(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4 | 76 | 0 |
def _A ( SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : int = 7 , SCREAMING_SNAKE_CASE : int = 1_000_000 ):
"""simple docstring"""
a__ : List[str] =0
a__ : Dict =1
for current_denominator in range(1 , limit + 1 ):
a__ : Union[str, Any] =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
a__ : Any =current_numerator
a__ : Tuple =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 95 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : Tuple = tokenizer("Hello there" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : str = model(a , labels=a ).loss
SCREAMING_SNAKE_CASE : Any = -tf.math.reduce_mean(a ).numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 ) | 76 | 0 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(lowercase__ , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : int = _distribute_shards(**lowercase__ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : int = _split_gen_kwargs(lowercase__ , lowercase__ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def _snake_case ( lowercase__ , lowercase__ ):
if expected is RuntimeError:
with pytest.raises(lowercase__ ):
_number_of_shards_in_gen_kwargs(lowercase__ )
else:
_lowerCamelCase : Tuple = _number_of_shards_in_gen_kwargs(lowercase__ )
assert out == expected | 96 |
from math import factorial
def lowerCamelCase__ ( _a , _a , _a):
if successes > trials:
raise ValueError("successes must be lower or equal to trials")
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers")
if not isinstance(_a , _a) or not isinstance(_a , _a):
raise ValueError("the function is defined for non-negative integers")
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0")
SCREAMING_SNAKE_CASE : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
SCREAMING_SNAKE_CASE : List[Any] = float(factorial(_a))
coefficient /= factorial(_a) * factorial(trials - successes)
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75)) | 76 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class lowercase ( A__ , A__ ):
"""simple docstring"""
_a = 'resnet'
_a = ['basic', 'bottleneck']
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=64 , UpperCamelCase_=[256, 512, 1024, 2048] , UpperCamelCase_=[3, 4, 6, 3] , UpperCamelCase_="bottleneck" , UpperCamelCase_="relu" , UpperCamelCase_=False , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
UpperCamelCase__ :Optional[Any] = num_channels
UpperCamelCase__ :int = embedding_size
UpperCamelCase__ :Dict = hidden_sizes
UpperCamelCase__ :int = depths
UpperCamelCase__ :int = layer_type
UpperCamelCase__ :List[Any] = hidden_act
UpperCamelCase__ :Optional[int] = downsample_in_first_stage
UpperCamelCase__ :Tuple = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :Tuple = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
class lowercase ( A__ ):
"""simple docstring"""
_a = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return 1e-3 | 97 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =CustomTokenizer
pass | 76 | 0 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = SMALL_MODEL_IDENTIFIER
UpperCAmelCase__ = 'pt'
UpperCAmelCase__ = 'tf'
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : int ):
UpperCAmelCase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCamelCase__ )
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = TFAutoModel.from_pretrained(self.test_model ,from_pt=lowerCamelCase__ )
model_tf.save_pretrained(lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = 'mock_framework'
# Framework provided - return whatever the user provides
UpperCAmelCase__ = FeaturesManager.determine_framework(self.test_model ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCamelCase__ )
UpperCAmelCase__ = FeaturesManager.determine_framework(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCamelCase__ )
UpperCAmelCase__ = FeaturesManager.determine_framework(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCamelCase__ )
UpperCAmelCase__ = FeaturesManager.determine_framework(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCamelCase__ )
UpperCAmelCase__ = FeaturesManager.determine_framework(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = FeaturesManager.determine_framework(lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = MagicMock(return_value=lowerCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' ,lowerCamelCase__ ):
UpperCAmelCase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase__ ,self.framework_pt )
# PyTorch not in environment -> use TensorFlow
UpperCAmelCase__ = MagicMock(return_value=lowerCamelCase__ )
with patch('transformers.onnx.features.is_torch_available' ,lowerCamelCase__ ):
UpperCAmelCase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase__ ,self.framework_tf )
# Both in environment -> use PyTorch
UpperCAmelCase__ = MagicMock(return_value=lowerCamelCase__ )
UpperCAmelCase__ = MagicMock(return_value=lowerCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' ,lowerCamelCase__ ), patch(
'transformers.onnx.features.is_torch_available' ,lowerCamelCase__ ):
UpperCAmelCase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase__ ,self.framework_pt )
# Both not in environment -> raise error
UpperCAmelCase__ = MagicMock(return_value=lowerCamelCase__ )
UpperCAmelCase__ = MagicMock(return_value=lowerCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' ,lowerCamelCase__ ), patch(
'transformers.onnx.features.is_torch_available' ,lowerCamelCase__ ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = FeaturesManager.determine_framework(self.test_model )
| 98 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a_ = logging.getLogger(__name__)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = False
def __UpperCamelCase ( self : str , a : str , a : Optional[int] , a : Any , a : str ) -> List[Any]:
"""simple docstring"""
if not self.initialized:
SCREAMING_SNAKE_CASE : List[str] = RagRetriever(
a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , )
SCREAMING_SNAKE_CASE : Optional[int] = True
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self.retriever.index.init_index()
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.retriever._main_retrieve(a , a )
return doc_ids, retrieved_doc_embeds
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Tuple , a : Any , a : Tuple , a : Tuple , a : Tuple , a : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
if index is not None and index.is_initialized() and len(a ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , )
SCREAMING_SNAKE_CASE : Optional[Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(a , a , a , a )
for worker in self.retrieval_workers
] )
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Any ) -> int:
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
SCREAMING_SNAKE_CASE : Optional[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = ray.get(random_worker.retrieve.remote(a , a ) )
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self._main_retrieve(a , a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a )
@classmethod
def __UpperCamelCase ( cls : str , a : Optional[Any] , a : Any=None , **a : List[Any] ) -> str:
"""simple docstring"""
return super(a , cls ).get_tokenizers(a , a , **a )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , a : int , a : Any , a : List[Any]=None , **a : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = kwargs.pop("config" , a ) or RagConfig.from_pretrained(a , **a )
SCREAMING_SNAKE_CASE : List[Any] = RagTokenizer.from_pretrained(a , config=a )
SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.question_encoder
SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
SCREAMING_SNAKE_CASE : str = "custom"
SCREAMING_SNAKE_CASE : List[Any] = CustomHFIndex(config.retrieval_vector_size , a )
else:
SCREAMING_SNAKE_CASE : List[str] = cls._build_index(a )
return cls(
a , question_encoder_tokenizer=a , generator_tokenizer=a , retrieval_workers=a , index=a , ) | 76 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Dict = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99 |
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = data
SCREAMING_SNAKE_CASE : int = None
def __repr__( self : str ) -> str:
"""simple docstring"""
return F"Node({self.data})"
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = None
def __iter__( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self : str ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
return "->".join([str(a ) for item in self] )
def __getitem__( self : List[Any] , a : int ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Tuple , a : int , a : Any ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
SCREAMING_SNAKE_CASE : str = self.head
for _ in range(a ):
SCREAMING_SNAKE_CASE : str = current.next
SCREAMING_SNAKE_CASE : Any = data
def __UpperCamelCase ( self : List[str] , a : Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , a )
def __UpperCamelCase ( self : Union[str, Any] , a : Any ) -> None:
"""simple docstring"""
self.insert_nth(0 , a )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Any ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
SCREAMING_SNAKE_CASE : Any = Node(a )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Optional[int] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : List[Any] = new_node
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Optional[int] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next
SCREAMING_SNAKE_CASE : int = new_node
def __UpperCamelCase ( self : Optional[int] ) -> None: # print every node data
"""simple docstring"""
print(self )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def __UpperCamelCase ( self : Any ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __UpperCamelCase ( self : List[str] , a : int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
SCREAMING_SNAKE_CASE : Tuple = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next.next
return delete_node.data
def __UpperCamelCase ( self : List[Any] ) -> bool:
"""simple docstring"""
return self.head is None
def __UpperCamelCase ( self : Optional[int] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : str = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Any = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : List[Any] = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : Any = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : str = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : Optional[Any] = prev
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(_a) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10):
assert len(_a) == i
linked_list.insert_nth(_a , i + 1)
assert str(_a) == "->".join(str(_a) for i in range(1 , 11))
linked_list.insert_head(0)
linked_list.insert_tail(11)
assert str(_a) == "->".join(str(_a) for i in range(0 , 12))
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9) == 10
assert linked_list.delete_tail() == 11
assert len(_a) == 9
assert str(_a) == "->".join(str(_a) for i in range(1 , 10))
assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True
for i in range(0 , 9):
SCREAMING_SNAKE_CASE : str = -i
assert all(linked_list[i] == -i for i in range(0 , 9)) is True
linked_list.reverse()
assert str(_a) == "->".join(str(_a) for i in range(-8 , 1))
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = [
-9,
100,
Node(77345112),
"dlrow olleH",
7,
5555,
0,
-192.5_5555,
"Hello, world!",
77.9,
Node(10),
None,
None,
12.20,
]
SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(_a)
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_a) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : Any = linked_list.delete_nth(10)
assert result is None
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!"))
assert (
str(_a)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_a)
assert (
str(_a)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_a)
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase__ ( ):
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head ").strip())
linked_list.insert_head(input("Inserting 2nd at head ").strip())
print("\nPrint list:")
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail ").strip())
linked_list.insert_tail(input("Inserting 2nd at tail ").strip())
print("\nPrint list:")
linked_list.print_list()
print("\nDelete head")
linked_list.delete_head()
print("Delete tail")
linked_list.delete_tail()
print("\nPrint list:")
linked_list.print_list()
print("\nReverse linked list")
linked_list.reverse()
print("\nPrint list:")
linked_list.print_list()
print("\nString representation of linked list:")
print(_a)
print("\nReading/changing Node data using indexing:")
print(f"Element at Position 1: {linked_list[1]}")
SCREAMING_SNAKE_CASE : Dict = input("Enter New Value: ").strip()
print("New list:")
print(_a)
print(f"length of linked_list is : {len(_a)}")
if __name__ == "__main__":
main() | 76 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__magic_name__ = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ = get_logger(__name__)
class _UpperCamelCase ( enum.Enum ):
'''simple docstring'''
lowerCamelCase__ ='all_checks'
lowerCamelCase__ ='basic_checks'
lowerCamelCase__ ='no_checks'
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a , _a=None):
if expected_checksums is None:
logger.info("Unable to verify checksums.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedDownloadedFile(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : str = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE : Tuple = " for " + verification_name if verification_name is not None else ""
if len(_a) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error")
logger.info("All the checksums matched successfully" + for_verification_name)
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a):
if expected_splits is None:
logger.info("Unable to verify splits sizes.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreSplits(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedSplits(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : List[str] = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_a) > 0:
raise NonMatchingSplitsSizesError(str(_a))
logger.info("All the splits matched successfully.")
def lowerCamelCase__ ( _a , _a = True):
if record_checksum:
SCREAMING_SNAKE_CASE : List[str] = shaaaa()
with open(_a , "rb") as f:
for chunk in iter(lambda: f.read(1 << 20) , b""):
m.update(_a)
SCREAMING_SNAKE_CASE : Optional[int] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE : List[str] = None
return {"num_bytes": os.path.getsize(_a), "checksum": checksum}
def lowerCamelCase__ ( _a):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False | 76 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
lowercase__ :Optional[int] = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[Any] ='''decision_transformer'''
lowercase_ : str =['''past_key_values''']
lowercase_ : Optional[int] ={
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,A__=1_7 ,A__=4 ,A__=1_2_8 ,A__=4_0_9_6 ,A__=True ,A__=1 ,A__=1_0_2_4 ,A__=3 ,A__=1 ,A__=None ,A__="relu" ,A__=0.1 ,A__=0.1 ,A__=0.1 ,A__=1E-5 ,A__=0.02 ,A__=True ,A__=True ,A__=5_0_2_5_6 ,A__=5_0_2_5_6 ,A__=False ,A__=False ,**A__ ,):
lowercase = state_dim
lowercase = act_dim
lowercase = hidden_size
lowercase = max_ep_len
lowercase = action_tanh
lowercase = vocab_size
lowercase = n_positions
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = scale_attn_by_inverse_layer_idx
lowercase = reorder_and_upcast_attn
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=A__ ,eos_token_id=A__ ,**A__)
| 101 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _a , _a):
# Load checkpoint
SCREAMING_SNAKE_CASE : int = torch.load(_a , map_location="cpu")
SCREAMING_SNAKE_CASE : Dict = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : List[str] = v
else:
SCREAMING_SNAKE_CASE : int = v
SCREAMING_SNAKE_CASE : int = chkpt["params"]
SCREAMING_SNAKE_CASE : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray))}
SCREAMING_SNAKE_CASE : List[Any] = chkpt["dico_word2id"]
SCREAMING_SNAKE_CASE : List[Any] = {s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@" , ""): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"Save PyTorch model to {pytorch_weights_dump_path}")
torch.save(_a , _a)
print(f"Save configuration file to {pytorch_config_dump_path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , indent=2) + "\n")
print(f"Save vocab file to {pytorch_config_dump_path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , indent=2) + "\n")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 76 | 0 |
"""simple docstring"""
import os
from math import logaa
def lowercase ( _snake_case : str = "base_exp.txt" ) ->int:
"""simple docstring"""
__snake_case : float = 0
__snake_case : List[Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) ):
__snake_case , __snake_case : Optional[int] = list(map(_snake_case , line.split(''',''' ) ) )
if x * logaa(_snake_case ) > largest:
__snake_case : Tuple = x * logaa(_snake_case )
__snake_case : Dict = i + 1
return result
if __name__ == "__main__":
print(solution())
| 102 |
def lowerCamelCase__ ( _a , _a):
_validate_point(_a)
_validate_point(_a)
if len(_a) != len(_a):
raise ValueError("Both points must be in the same n-dimensional space")
return float(sum(abs(a - b) for a, b in zip(_a , _a)))
def lowerCamelCase__ ( _a):
if point:
if isinstance(_a , _a):
for item in point:
if not isinstance(_a , (int, float)):
SCREAMING_SNAKE_CASE : List[Any] = (
"Expected a list of numbers as input, found "
f"{type(_a).__name__}"
)
raise TypeError(_a)
else:
SCREAMING_SNAKE_CASE : List[Any] = f"Expected a list of numbers as input, found {type(_a).__name__}"
raise TypeError(_a)
else:
raise ValueError("Missing an input")
def lowerCamelCase__ ( _a , _a):
_validate_point(_a)
_validate_point(_a)
if len(_a) != len(_a):
raise ValueError("Both points must be in the same n-dimensional space")
return float(sum(abs(x - y) for x, y in zip(_a , _a)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
A__ : int = logging.get_logger(__name__)
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any] ):
lowerCAmelCase_ : Any = nn.functional.normalize(__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase ,normalized_text_embeds.t() )
class __snake_case ( UpperCamelCase_ ):
_a = CLIPConfig
_a = ['''CLIPEncoderLayer''']
def __init__( self : Dict , A_ : CLIPConfig):
super().__init__(A_)
lowerCAmelCase_ : Optional[Any] = CLIPVisionModel(config.vision_config)
lowerCAmelCase_ : Dict = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=A_)
lowerCAmelCase_ : List[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim) , requires_grad=A_)
lowerCAmelCase_ : Tuple = nn.Parameter(torch.ones(3 , config.projection_dim) , requires_grad=A_)
lowerCAmelCase_ : int = nn.Parameter(torch.ones(1_7) , requires_grad=A_)
lowerCAmelCase_ : int = nn.Parameter(torch.ones(3) , requires_grad=A_)
@torch.no_grad()
def UpperCAmelCase__ ( self : int , A_ : int , A_ : int):
lowerCAmelCase_ : int = self.vision_model(A_)[1] # pooled_output
lowerCAmelCase_ : Optional[Any] = self.visual_projection(A_)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase_ : int = cosine_distance(A_ , self.special_care_embeds).cpu().float().numpy()
lowerCAmelCase_ : Dict = cosine_distance(A_ , self.concept_embeds).cpu().float().numpy()
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Optional[Any] = image_embeds.shape[0]
for i in range(A_):
lowerCAmelCase_ : int = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase_ : Any = 0.0
for concept_idx in range(len(special_cos_dist[0])):
lowerCAmelCase_ : Optional[int] = special_cos_dist[i][concept_idx]
lowerCAmelCase_ : Optional[int] = self.special_care_embeds_weights[concept_idx].item()
lowerCAmelCase_ : Dict = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]})
lowerCAmelCase_ : str = 0.01
for concept_idx in range(len(cos_dist[0])):
lowerCAmelCase_ : Optional[Any] = cos_dist[i][concept_idx]
lowerCAmelCase_ : Dict = self.concept_embeds_weights[concept_idx].item()
lowerCAmelCase_ : Any = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(A_)
result.append(A_)
lowerCAmelCase_ : str = [len(res['''bad_concepts''']) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self : str , A_ : torch.FloatTensor , A_ : torch.FloatTensor):
lowerCAmelCase_ : str = self.vision_model(A_)[1] # pooled_output
lowerCAmelCase_ : Optional[int] = self.visual_projection(A_)
lowerCAmelCase_ : List[str] = cosine_distance(A_ , self.special_care_embeds)
lowerCAmelCase_ : Union[str, Any] = cosine_distance(A_ , self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase_ : Any = 0.0
lowerCAmelCase_ : List[str] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCAmelCase_ : List[str] = torch.any(special_scores > 0 , dim=1)
lowerCAmelCase_ : Optional[int] = special_care * 0.01
lowerCAmelCase_ : Optional[Any] = special_adjustment.unsqueeze(1).expand(-1 , cos_dist.shape[1])
lowerCAmelCase_ : str = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCAmelCase_ : Dict = torch.any(concept_scores > 0 , dim=1)
return images, has_nsfw_concepts
| 103 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='vit_msn'
def __init__( self : str , a : Tuple=768 , a : Tuple=12 , a : Any=12 , a : int=3072 , a : List[Any]="gelu" , a : Dict=0.0 , a : int=0.0 , a : str=0.02 , a : List[str]=1e-06 , a : List[Any]=224 , a : Union[str, Any]=16 , a : Union[str, Any]=3 , a : Tuple=True , **a : Dict , ) -> List[Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[str] = qkv_bias | 76 | 0 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : bool = True ,lowercase__ : bool = False ):
__lowercase = scheduler
__lowercase = optimizers if isinstance(lowercase__ ,(list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def SCREAMING_SNAKE_CASE ( self : Any ,*lowercase__ : str ,**lowercase__ : str ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowercase__ ,**lowercase__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowercase__ ,**lowercase__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(lowercase__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler ,'''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowercase__ ,**lowercase__ )
else:
self.scheduler.step(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.scheduler.get_last_lr()
def SCREAMING_SNAKE_CASE ( self : int ):
return self.scheduler.state_dict()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Tuple ):
self.scheduler.load_state_dict(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
return self.scheduler.get_lr()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,*lowercase__ : List[Any] ,**lowercase__ : List[str] ):
return self.scheduler.print_lr(*lowercase__ ,**lowercase__ )
| 104 |
import baseaa
def lowerCamelCase__ ( _a):
return baseaa.aaaencode(string.encode("utf-8"))
def lowerCamelCase__ ( _a):
return baseaa.aaadecode(_a).decode("utf-8")
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __UpperCamelCase :
lowerCamelCase : List[str]
lowerCamelCase : Optional[str] =None
# Automatically constructed
lowerCamelCase : ClassVar[str] ="dict"
lowerCamelCase : ClassVar[Any] =None
lowerCamelCase : str =field(default="""Translation""" , init=a__ , repr=a__ )
def __call__( self ) -> Optional[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __a ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class __UpperCamelCase :
lowerCamelCase : Optional[List] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[str] =None
# Automatically constructed
lowerCamelCase : ClassVar[str] ="dict"
lowerCamelCase : ClassVar[Any] =None
lowerCamelCase : str =field(default="""TranslationVariableLanguages""" , init=a__ , repr=a__ )
def __a ( self ) -> Optional[Any]:
a : str = sorted(set(self.languages ) ) if self.languages else None
a : Tuple = len(self.languages ) if self.languages else None
def __call__( self ) -> Any:
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
a : Optional[int] = set(self.languages )
if self.languages and set(lowerCAmelCase__ ) - lang_set:
raise ValueError(
f"""Some languages in example ({', '.join(sorted(set(lowerCAmelCase__ ) - lang_set ) )}) are not in valid set ({', '.join(lowerCAmelCase__ )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a : List[str] = []
for lang, text in translation_dict.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a, a : Union[str, Any] = zip(*sorted(lowerCAmelCase__ ) )
return {"language": languages, "translation": translations}
def __a ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 105 |
from datetime import datetime as dt
import os
from github import Github
a_ = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : int = Github(os.environ["GITHUB_TOKEN"])
SCREAMING_SNAKE_CASE : List[str] = g.get_repo("huggingface/transformers")
SCREAMING_SNAKE_CASE : Optional[int] = repo.get_issues(state="open")
for issue in open_issues:
SCREAMING_SNAKE_CASE : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _a: i.created_at , reverse=_a)
SCREAMING_SNAKE_CASE : str = comments[0] if len(_a) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
if __name__ == "__main__":
main() | 76 | 0 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase_ : int | None = None ):
lowerCAmelCase__ : List[str] = value
lowerCAmelCase__ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase__ : Node | None = None
lowerCAmelCase__ : Node | None = None
def __repr__( self : List[str] ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'{self.value}': (self.left, self.right)} ,indent=1 )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : int ,lowercase_ : Node | None = None ):
lowerCAmelCase__ : Union[str, Any] = root
def __str__( self : Union[str, Any] ):
return str(self.root )
def __lowerCAmelCase ( self : Dict ,lowercase_ : Node ,lowercase_ : Node | None ):
if new_children is not None: # reset its kids
lowerCAmelCase__ : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowercase_ ): # If it is the right children
lowerCAmelCase__ : Union[str, Any] = new_children
else:
lowerCAmelCase__ : Optional[Any] = new_children
else:
lowerCAmelCase__ : List[str] = new_children
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Node ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __lowerCAmelCase ( self : List[str] ):
return self.root is None
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Any ):
lowerCAmelCase__ : Union[str, Any] = Node(lowercase_ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ : Dict = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ : Union[str, Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ : int = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ : Any = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ : Any = new_node
break
else:
lowerCAmelCase__ : List[str] = parent_node.right
lowerCAmelCase__ : Dict = parent_node
def __lowerCAmelCase ( self : str ,*lowercase_ : Dict ):
for value in values:
self.__insert(lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Any ):
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
lowerCAmelCase__ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ : Tuple = node.left if value < node.value else node.right
return node
def __lowerCAmelCase ( self : int ,lowercase_ : Node | None = None ):
if node is None:
if self.root is None:
return None
lowerCAmelCase__ : str = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ : List[Any] = node.right
return node
def __lowerCAmelCase ( self : Any ,lowercase_ : Node | None = None ):
if node is None:
lowerCAmelCase__ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ : str = self.root
while node.left is not None:
lowerCAmelCase__ : int = node.left
return node
def __lowerCAmelCase ( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : Tuple = self.search(lowercase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowercase_ ,lowercase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowercase_ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowercase_ ,node.left )
else:
lowerCAmelCase__ : Dict = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ : List[str] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Node | None ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __lowerCAmelCase ( self : str ,lowercase_ : List[str]=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __lowerCAmelCase ( self : Dict ,lowercase_ : list ,lowercase_ : Node | None ):
if node:
self.inorder(lowercase_ ,node.left )
arr.append(node.value )
self.inorder(lowercase_ ,node.right )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : int ,lowercase_ : Node ):
lowerCAmelCase__ : list[int] = []
self.inorder(lowercase_ ,lowercase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Union[str, Any] = []
if curr_node is not None:
lowerCAmelCase__ : int = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase__ : Optional[int] = BinarySearchTree()
for i in testlist:
t.insert(A_ )
# Prints all the elements of the list in order traversal
print(A_ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(A_ )
print(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 106 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
if isinstance(_a , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(_a , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(_a):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}")
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =['pixel_values']
def __init__( self : Optional[Any] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {"shortest_edge": 256}
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE : str = get_size_dict(a , param_name="crop_size" )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop
SCREAMING_SNAKE_CASE : int = crop_size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : Tuple = offset
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : Optional[Any] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(a , size["shortest_edge"] , default_to_square=a )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE : Dict = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a , size=a , resample=a , data_format=a , **a )
def __UpperCamelCase ( self : List[str] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __UpperCamelCase ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : bool = True , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = image.astype(np.floataa )
if offset:
SCREAMING_SNAKE_CASE : Union[str, Any] = image - (scale / 2)
return rescale(a , scale=a , data_format=a , **a )
def __UpperCamelCase ( self : int , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def __UpperCamelCase ( self : Tuple , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : List[str] = to_numpy_array(a )
if do_resize:
SCREAMING_SNAKE_CASE : Optional[Any] = self.resize(image=a , size=a , resample=a )
if do_center_crop:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.center_crop(a , size=a )
if do_rescale:
SCREAMING_SNAKE_CASE : Any = self.rescale(image=a , scale=a , offset=a )
if do_normalize:
SCREAMING_SNAKE_CASE : Tuple = self.normalize(image=a , mean=a , std=a )
SCREAMING_SNAKE_CASE : Optional[int] = to_channel_dimension_format(a , a )
return image
def __UpperCamelCase ( self : Dict , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : int = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : int = size if size is not None else self.size
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(a , param_name="crop_size" )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
SCREAMING_SNAKE_CASE : Optional[int] = make_batched(a )
SCREAMING_SNAKE_CASE : List[Any] = [
[
self._preprocess_image(
image=a , do_resize=a , size=a , resample=a , do_center_crop=a , crop_size=a , do_rescale=a , rescale_factor=a , offset=a , do_normalize=a , image_mean=a , image_std=a , data_format=a , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": videos}
return BatchFeature(data=a , tensor_type=a ) | 76 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowerCAmelCase : str = Lock()
def __magic_name__ ( A : Optional[Any], A : Any, A : Union[str, Any], A : List[str], A : int, A : Optional[int], A : Dict ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0, 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
a = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
a = min(A, A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
a = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
a = max(A, A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A : int ):
'''simple docstring'''
a = []
a = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
a = Pipe()
a = Pipe()
process_array_.append(
Process(
target=A, args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) )
a = temp_rs
a = temp_rr
for i in range(1, len(A ) - 1 ):
a = Pipe()
a = Pipe()
process_array_.append(
Process(
target=A, args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) )
a = temp_rs
a = temp_rr
process_array_.append(
Process(
target=A, args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
), ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0, len(A ) ):
a = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ):
'''simple docstring'''
a = list(range(10, 0, -1 ) )
print("Initial List" )
print(*A )
a = odd_even_transposition(A )
print("Sorted List\n" )
print(*A )
if __name__ == "__main__":
main()
| 107 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
a_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =ConvBertTokenizer
def __init__( self : List[str] , a : Union[str, Any]=None , a : Optional[int]=None , a : int=True , a : Tuple="[UNK]" , a : Dict="[SEP]" , a : Dict="[PAD]" , a : List[Any]="[CLS]" , a : Tuple="[MASK]" , a : Dict=True , a : Optional[Any]=None , **a : str , ) -> Dict:
"""simple docstring"""
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a ) != do_lower_case
or normalizer_state.get("strip_accents" , a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : List[str] = getattr(a , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : Any = strip_accents
SCREAMING_SNAKE_CASE : Optional[int] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**a )
SCREAMING_SNAKE_CASE : str = do_lower_case
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : int=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Tuple , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(a , name=a )
return tuple(a ) | 76 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = a.name
lowerCAmelCase : int = b.name
lowerCAmelCase : Union[str, Any] = ""
lowerCAmelCase : Union[str, Any] = ""
lowerCAmelCase : str = a == b
lowerCAmelCase : Any = name_a
lowerCAmelCase : Dict = name_b
return res
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_graph_replace_input_with(node_proto.attribute[1].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : int = list(model.graph.initializer )
lowerCAmelCase : str = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCAmelCase : List[Any] = inits[i].name
lowerCAmelCase : Union[str, Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = os.path.dirname(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = os.path.basename(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = onnx.load(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : List[Any] = list(model.graph.initializer )
lowerCAmelCase : List[str] = set()
lowerCAmelCase : Any = {}
lowerCAmelCase : Dict = []
lowerCAmelCase : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(SCREAMING_SNAKE_CASE )
dup_set.add(SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = inits[j].data_type
lowerCAmelCase : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("unexpected data type: " , SCREAMING_SNAKE_CASE )
total_reduced_size += mem_size
lowerCAmelCase : Any = inits[i].name
lowerCAmelCase : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : Dict = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , "GB" )
lowerCAmelCase : str = sorted(SCREAMING_SNAKE_CASE )
_remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = "optimized_" + model_file_name
lowerCAmelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
onnx.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return new_model
| 108 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_a)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : Union[str, Any] = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(_a , id=_a) | 76 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: List[str] = logging.get_logger(__name__)
A: Tuple = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Union[str, Any] = 'poolformer'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 128, 320, 512] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Dict = stride
UpperCAmelCase : str = padding
UpperCAmelCase : Tuple = pool_size
UpperCAmelCase : Optional[int] = hidden_sizes
UpperCAmelCase : Optional[Any] = mlp_ratio
UpperCAmelCase : Union[str, Any] = depths
UpperCAmelCase : Any = patch_sizes
UpperCAmelCase : Tuple = strides
UpperCAmelCase : Dict = num_encoder_blocks
UpperCAmelCase : Tuple = drop_path_rate
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Any = use_layer_scale
UpperCAmelCase : List[Any] = layer_scale_init_value
UpperCAmelCase : List[Any] = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
'''simple docstring'''
return 2E-3
| 109 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , a : int , a : Optional[int]=13 , a : Optional[int]=3 , a : int=224 , a : Optional[int]=30 , a : int=400 , a : Union[str, Any]=True , a : int=None , a : Tuple=True , a : Tuple=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : str = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : int = do_normalize
SCREAMING_SNAKE_CASE : Tuple = image_mean
SCREAMING_SNAKE_CASE : Tuple = image_std
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , ) | 76 | 0 |
import torch
from diffusers import DiffusionPipeline
class _a ( UpperCamelCase__ ):
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
def __call__( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowercase__ = 1
lowercase__ = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
lowercase__ = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
lowercase__ = scheduler_output - scheduler_output + torch.ones_like(UpperCamelCase_ )
return result
| 110 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Any = tokenizer(example["content"] , truncation=_a)["input_ids"]
SCREAMING_SNAKE_CASE : Dict = len(example["content"]) / len(output["input_ids"])
return output
a_ = HfArgumentParser(PretokenizationArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a_ = time.time()
a_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 76 | 0 |
'''simple docstring'''
from timeit import timeit
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCamelCase__ = 0
while number:
number &= number - 1
result += 1
return result
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCamelCase__ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
def do_benchmark(__A ) -> None:
UpperCamelCase__ = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(_a ) = }''' )
UpperCamelCase__ = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_a )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_a ) = }''' )
UpperCamelCase__ = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_a , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 80 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
# initialize config
if "resnet-50" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-50")
elif "resnet-101" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-101")
else:
raise ValueError("Model name should include either resnet50 or resnet101")
SCREAMING_SNAKE_CASE : str = DetrConfig(use_timm_backbone=_a , backbone_config=_a)
# set label attributes
SCREAMING_SNAKE_CASE : List[str] = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE : Union[str, Any] = 250
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 91
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Union[str, Any] = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r"))
SCREAMING_SNAKE_CASE : int = {int(_a): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase__ ( _a):
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight"))
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight"))
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias"))
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean"))
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var"))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
))
# 3 convs
for i in range(3):
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
f"encoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
f"decoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
])
return rename_keys
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : str = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : int = val
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE : Optional[Any] = ""
if is_panoptic:
SCREAMING_SNAKE_CASE : Optional[int] = "detr."
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(_a , stream=_a).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( _a , _a=None , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = get_detr_config(_a)
# load original model from torch hub
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f"Converting model {model_name}...")
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_a).eval()
SCREAMING_SNAKE_CASE : Tuple = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_a):
if is_panoptic:
SCREAMING_SNAKE_CASE : List[str] = "detr." + src
rename_key(_a , _a , _a)
# query, key and value matrices need special treatment
read_in_q_k_v(_a , is_panoptic=_a)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE : List[Any] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr")
and not key.startswith("class_labels_classifier")
and not key.startswith("bbox_predictor")
):
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Optional[int] = val
elif key.startswith("bbox_attention") or key.startswith("mask_head"):
continue
else:
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : List[Any] = val
else:
if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Any = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : int = DetrForSegmentation(_a) if is_panoptic else DetrForObjectDetection(_a)
model.load_state_dict(_a)
model.eval()
# verify our conversion on an image
SCREAMING_SNAKE_CASE : int = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE : Optional[int] = DetrImageProcessor(format=_a)
SCREAMING_SNAKE_CASE : List[str] = processor(images=prepare_img() , return_tensors="pt")
SCREAMING_SNAKE_CASE : Any = encoding["pixel_values"]
SCREAMING_SNAKE_CASE : Optional[Any] = detr(_a)
SCREAMING_SNAKE_CASE : Any = model(_a)
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3)
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
Path(_a).mkdir(exist_ok=_a)
model.save_pretrained(_a)
processor.save_pretrained(_a)
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub...")
model.push_to_hub(f"nielsr/{model_name}")
processor.push_to_hub(f"nielsr/{model_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
a_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 76 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 259 |
import os
def lowerCamelCase__ ( ):
with open(os.path.dirname(_a) + "/p022_names.txt") as file:
SCREAMING_SNAKE_CASE : List[str] = str(file.readlines()[0])
SCREAMING_SNAKE_CASE : List[Any] = names.replace("\"" , "").split(",")
names.sort()
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Dict = 0
for i, name in enumerate(_a):
for letter in name:
name_score += ord(_a) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE : str = 0
return total_score
if __name__ == "__main__":
print(solution()) | 76 | 0 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
return abs(_a ) if a == 0 else greatest_common_divisor(b % a , _a )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCamelCase_ = y, x % y
return abs(_a )
def lowerCamelCase_ ( ):
try:
lowerCamelCase_ = input("Enter two integers separated by comma (,): " ).split("," )
lowerCamelCase_ = int(nums[0] )
lowerCamelCase_ = int(nums[1] )
print(
F'greatest_common_divisor({num_a}, {num_a}) = '
F'{greatest_common_divisor(_a , _a )}' )
print(F'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_a , _a )}' )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 19 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size))
SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,))
SCREAMING_SNAKE_CASE : int = ya
SCREAMING_SNAKE_CASE : int = xa
for k in range(_a):
SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k])
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase__ ( A__ ) -> str:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_a )
def UpperCamelCase__ ( A__ ) -> Union[str, Any]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Union[str, Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_a , id=_a )
| 143 |
def lowerCamelCase__ ( _a , _a):
return int((input_a, input_a).count(1) != 0)
def lowerCamelCase__ ( ):
assert or_gate(0 , 0) == 0
assert or_gate(0 , 1) == 1
assert or_gate(1 , 0) == 1
assert or_gate(1 , 1) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1)) | 76 | 0 |
from __future__ import annotations
def A ( a_ = 4 ) -> Optional[int]:
__UpperCamelCase : Optional[Any] =abs(_a ) or 4
return [[1 + x + y * row_size for x in range(_a )] for y in range(_a )]
def A ( a_ ) -> List[str]:
return reverse_row(transpose(_a ) )
# OR.. transpose(reverse_column(matrix))
def A ( a_ ) -> Union[str, Any]:
return reverse_row(reverse_column(_a ) )
# OR.. reverse_column(reverse_row(matrix))
def A ( a_ ) -> str:
return reverse_column(transpose(_a ) )
# OR.. transpose(reverse_row(matrix))
def A ( a_ ) -> Optional[int]:
__UpperCamelCase : Optional[int] =[list(_a ) for x in zip(*_a )]
return matrix
def A ( a_ ) -> List[str]:
__UpperCamelCase : Tuple =matrix[::-1]
return matrix
def A ( a_ ) -> Any:
__UpperCamelCase : Optional[Any] =[x[::-1] for x in matrix]
return matrix
def A ( a_ ) -> Union[str, Any]:
for i in matrix:
print(*_a )
if __name__ == "__main__":
A_ :Optional[Any] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
A_ :Optional[Any] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
A_ :int = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 71 |
a_ = 8.314_4598
def lowerCamelCase__ ( _a , _a):
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K")
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol")
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a_ = 300
a_ = 28
a_ = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''') | 76 | 0 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=False , _a=False , _a=False , _a=2 , _a=99 , _a=0 , _a=32 , _a=5 , _a=4 , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=2 , _a=4 , _a="last" , _a=True , _a=None , _a=0 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_lengths
__a = use_token_type_ids
__a = use_labels
__a = gelu_activation
__a = sinusoidal_embeddings
__a = causal
__a = asm
__a = n_langs
__a = vocab_size
__a = n_special
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = summary_type
__a = use_proj
__a = scope
__a = bos_token_id
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_input_lengths:
__a = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , 2 ).float()
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __UpperCAmelCase ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = XLMModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a , lengths=_a , langs=_a )
__a = model(_a , langs=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = XLMWithLMHeadModel(_a )
model.to(_a )
model.eval()
__a = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = XLMForQuestionAnsweringSimple(_a )
model.to(_a )
model.eval()
__a = model(_a )
__a = model(_a , start_positions=_a , end_positions=_a )
__a = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = XLMForQuestionAnswering(_a )
model.to(_a )
model.eval()
__a = model(_a )
__a = model(
_a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , p_mask=_a , )
__a = model(
_a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , )
(__a) = result_with_labels.to_tuple()
__a = model(_a , start_positions=_a , end_positions=_a )
(__a) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = XLMForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = model(_a )
__a = model(_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = self.num_labels
__a = XLMForTokenClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = self.num_choices
__a = XLMForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
__a
) = config_and_inputs
__a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__UpperCAmelCase : Any = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __UpperCAmelCase ( self , _a , _a , _a=False ):
__a = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
__a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def __UpperCAmelCase ( self ):
__a = XLMModelTester(self )
__a = ConfigTester(self , config_class=_a , emb_dim=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_a )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a=False , _a=1 ):
self.assertIsInstance(_a , _a )
self.assertListEqual(
[isinstance(_a , _a ) for iter_attentions in attentions] , [True] * len(_a ) )
self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_a ):
# adds PAD dummy token
__a = min_length + idx + 1
__a = min_length + idx + 1
__a = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_a ) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a=False , _a=1 ):
self.assertIsInstance(_a , _a )
self.assertListEqual(
[isinstance(_a , _a ) for iter_hidden_states in hidden_states] , [True] * len(_a ) , )
self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_a ):
# adds PAD dummy token
__a = min_length + idx + 1
__a = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_a ) , )
pass
@slow
def __UpperCAmelCase ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = XLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_a )
__a = torch.tensor([[14, 447]] , dtype=torch.long , device=_a ) # the president
__a = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__a = model.generate(_a , do_sample=_a )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _a )
| 45 |
a_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : int = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE : int = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
# get the last node from the path
SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE : List[str] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE : List[Any] = list(_a)
new_path.append(_a)
queue.append(_a)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_a)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase__ ( _a , _a , _a):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE : str = [start]
SCREAMING_SNAKE_CASE : Optional[Any] = set(_a)
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE : Union[str, Any] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
if node == target:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_a)
queue.append(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4 | 76 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = '''https://openaipublic.azureedge.net/jukebox/models/'''
UpperCamelCase = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
_SCREAMING_SNAKE_CASE = key.replace(""".model.1.bias""" ,""".conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
_SCREAMING_SNAKE_CASE = key.replace(""".model.1.weight""" ,""".conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
_SCREAMING_SNAKE_CASE = key.replace(""".model.3.bias""" ,""".conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
_SCREAMING_SNAKE_CASE = key.replace(""".model.3.weight""" ,""".conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
_SCREAMING_SNAKE_CASE = key.replace("""conditioner_blocks.0""" ,"""conditioner_blocks""" )
if "prime_prior" in key:
_SCREAMING_SNAKE_CASE = key.replace("""prime_prior""" ,"""encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_SCREAMING_SNAKE_CASE = key.replace(""".emb.""" ,""".""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" ,""".codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" ,"""metadata_embedding.""" )
if "x_emb.emb." in key:
_SCREAMING_SNAKE_CASE = key.replace("""0.x_emb.emb""" ,"""embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" ,"""encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" ,""".layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" ,"""_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" ,"""encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" ,"""encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" ,"""fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" ,"""embed_tokens""" )
return key
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
import re
_SCREAMING_SNAKE_CASE = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_encoder_block_conv_in.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] )
_SCREAMING_SNAKE_CASE = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
_SCREAMING_SNAKE_CASE = re_encoder_block_conv_in.sub(_a ,_a )
elif re_encoder_block_resnet.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_encoder_block_resnet.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] )
_SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]]
_SCREAMING_SNAKE_CASE = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
_SCREAMING_SNAKE_CASE = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
_SCREAMING_SNAKE_CASE = prefix + resnet_block
_SCREAMING_SNAKE_CASE = re_encoder_block_resnet.sub(_a ,_a )
elif re_encoder_block_proj_out.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_encoder_block_proj_out.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
_SCREAMING_SNAKE_CASE = re_encoder_block_proj_out.sub(_a ,_a )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_decoder_block_conv_out.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) - 2
_SCREAMING_SNAKE_CASE = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
_SCREAMING_SNAKE_CASE = re_decoder_block_conv_out.sub(_a ,_a )
elif re_decoder_block_resnet.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_decoder_block_resnet.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) - 2
_SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]]
_SCREAMING_SNAKE_CASE = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
_SCREAMING_SNAKE_CASE = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
_SCREAMING_SNAKE_CASE = prefix + resnet_block
_SCREAMING_SNAKE_CASE = re_decoder_block_resnet.sub(_a ,_a )
elif re_decoder_block_proj_in.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_decoder_block_proj_in.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
_SCREAMING_SNAKE_CASE = re_decoder_block_proj_in.sub(_a ,_a )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_prior_cond_conv_out.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = int(groups[1] ) * 2 + int(groups[2] ) - 2
_SCREAMING_SNAKE_CASE = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
_SCREAMING_SNAKE_CASE = re_prior_cond_conv_out.sub(_a ,_a )
elif re_prior_cond_resnet.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_prior_cond_resnet.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = int(groups[1] ) * 2 + int(groups[2] ) - 2
_SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]]
_SCREAMING_SNAKE_CASE = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
_SCREAMING_SNAKE_CASE = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
_SCREAMING_SNAKE_CASE = prefix + resnet_block
_SCREAMING_SNAKE_CASE = re_prior_cond_resnet.sub(_a ,_a )
elif re_prior_cond_proj_in.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_prior_cond_proj_in.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
_SCREAMING_SNAKE_CASE = re_prior_cond_proj_in.sub(_a ,_a )
# keep original key
else:
_SCREAMING_SNAKE_CASE = original_key
_SCREAMING_SNAKE_CASE = replace_key(_a )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
_SCREAMING_SNAKE_CASE = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
_SCREAMING_SNAKE_CASE = original_key
_SCREAMING_SNAKE_CASE = original_key
_SCREAMING_SNAKE_CASE = value
return new_dict
@torch.no_grad()
def __lowerCamelCase ( snake_case__=None ,snake_case__=None ) -> Any:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
_SCREAMING_SNAKE_CASE = requests.get(F'{PREFIX}{file}' ,allow_redirects=_a )
os.makedirs(F'{pytorch_dump_folder_path}/' ,exist_ok=_a )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ,"""wb""" ).write(r.content )
_SCREAMING_SNAKE_CASE = MODEL_MAPPING[model_name.split("""/""" )[-1]]
_SCREAMING_SNAKE_CASE = JukeboxConfig.from_pretrained(_a )
_SCREAMING_SNAKE_CASE = JukeboxModel(_a )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {}
for i, dict_name in enumerate(_a ):
_SCREAMING_SNAKE_CASE = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["model"]
_SCREAMING_SNAKE_CASE = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
_SCREAMING_SNAKE_CASE = old_dic[k]
elif k.endswith(""".w""" ):
_SCREAMING_SNAKE_CASE = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_SCREAMING_SNAKE_CASE = old_dic[k]
else:
_SCREAMING_SNAKE_CASE = old_dic[k]
_SCREAMING_SNAKE_CASE = "vqvae" if i == 0 else F'priors.{3 - i}'
_SCREAMING_SNAKE_CASE = fix_jukebox_keys(_a ,model.state_dict() ,_a ,_a )
weight_dict.append(_a )
_SCREAMING_SNAKE_CASE = weight_dict.pop(0 )
model.vqvae.load_state_dict(_a )
for i in range(len(_a ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_a ).mkdir(exist_ok=_a )
with open(F'{pytorch_dump_folder_path}/mapping.json' ,"""w""" ) as txtfile:
json.dump(_a ,_a )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_a )
return weight_dict
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
UpperCamelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 306 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : Tuple = tokenizer("Hello there" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : str = model(a , labels=a ).loss
SCREAMING_SNAKE_CASE : Any = -tf.math.reduce_mean(a ).numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 ) | 76 | 0 |
def UpperCAmelCase_ ( __lowerCAmelCase = 4_000_000 ) -> Optional[Any]:
__lowercase : Any = [0, 1]
__lowercase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__lowercase : List[str] = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 156 |
from math import factorial
def lowerCamelCase__ ( _a , _a , _a):
if successes > trials:
raise ValueError("successes must be lower or equal to trials")
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers")
if not isinstance(_a , _a) or not isinstance(_a , _a):
raise ValueError("the function is defined for non-negative integers")
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0")
SCREAMING_SNAKE_CASE : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
SCREAMING_SNAKE_CASE : List[Any] = float(factorial(_a))
coefficient /= factorial(_a) * factorial(trials - successes)
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75)) | 76 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase_ = 250004
lowercase_ = 250020
@require_sentencepiece
@require_tokenizers
class A ( __A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = MBartaaTokenizer
lowerCamelCase = MBartaaTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = MBartaaTokenizer(lowercase_,src_lang='en_XX',tgt_lang='ro_RO',keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Optional[Any] )-> List[str]:
'''simple docstring'''
A__ = "<s>"
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ )
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'<s>' )
self.assertEqual(vocab_keys[1],'<pad>' )
self.assertEqual(vocab_keys[-1],'<mask>' )
self.assertEqual(len(lowercase_ ),1_0_5_4 )
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,1_0_5_4 )
def snake_case__ ( self : List[str] )-> List[Any]:
'''simple docstring'''
A__ = MBartaaTokenizer(lowercase_,src_lang='en_XX',tgt_lang='ro_RO',keep_accents=lowercase_ )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ),[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]],)
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_,[SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'],)
A__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_,[
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
],)
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_,[SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'],)
@slow
def snake_case__ ( self : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = {"input_ids": [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_,model_name='facebook/mbart-large-50',revision='d3913889c59cd5c9e456b269c376325eabad57e2',)
def snake_case__ ( self : int )-> Optional[int]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ )
A__ = self.tokenizer_class.from_pretrained(lowercase_,**lowercase_ )
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(lowercase_ )
A__ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowercase_,lowercase_ )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(lowercase_ )
A__ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_,lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(lowercase_,legacy_format=lowercase_ )
A__ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_,lowercase_ )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(lowercase_ )
A__ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_,lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(lowercase_,legacy_format=lowercase_ )
A__ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(lowercase_ )
A__ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_,lowercase_ ) )
shutil.rmtree(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = 'facebook/mbart-large-50-one-to-many-mmt'
lowerCamelCase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCamelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def snake_case__ ( cls : Any )-> Dict:
'''simple docstring'''
A__ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name,src_lang='en_XX',tgt_lang='ro_RO' )
A__ = 1
return cls
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'],2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'],2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'],2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'],2_5_0_0_3_8 )
def snake_case__ ( self : Dict )-> Tuple:
'''simple docstring'''
A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens,lowercase_ )
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
self.assertIn(lowercase_,self.tokenizer.all_special_ids )
A__ = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
A__ = self.tokenizer.decode(lowercase_,skip_special_tokens=lowercase_ )
A__ = self.tokenizer.decode(generated_ids[1:],skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_,lowercase_ )
self.assertNotIn(self.tokenizer.eos_token,lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> List[str]:
'''simple docstring'''
A__ = ["this is gunna be a long sentence " * 2_0]
assert isinstance(src_text[0],lowercase_ )
A__ = 1_0
A__ = self.tokenizer(lowercase_,max_length=lowercase_,truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[0],lowercase_ )
self.assertEqual(ids[-1],2 )
self.assertEqual(len(lowercase_ ),lowercase_ )
def snake_case__ ( self : Optional[int] )-> Any:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ),[2_5_0_0_5_3, 2_5_0_0_0_1] )
def snake_case__ ( self : Tuple )-> List[Any]:
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
A__ = MBartaaTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids,lowercase_ )
@require_torch
def snake_case__ ( self : Dict )-> Union[str, Any]:
'''simple docstring'''
A__ = self.tokenizer(self.src_text,text_target=self.tgt_text,padding=lowercase_,return_tensors='pt' )
A__ = shift_tokens_right(batch['labels'],self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self : List[Any] )-> Dict:
'''simple docstring'''
A__ = self.tokenizer(
self.src_text,text_target=self.tgt_text,padding=lowercase_,truncation=lowercase_,max_length=len(self.expected_src_tokens ),return_tensors='pt',)
A__ = shift_tokens_right(batch['labels'],self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase_,lowercase_ )
self.assertEqual((2, 1_4),batch.input_ids.shape )
self.assertEqual((2, 1_4),batch.attention_mask.shape )
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens,lowercase_ )
self.assertEqual(2,batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] )
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
A__ = self.tokenizer(self.src_text,padding=lowercase_,truncation=lowercase_,max_length=3,return_tensors='pt' )
A__ = self.tokenizer(
text_target=self.tgt_text,padding=lowercase_,truncation=lowercase_,max_length=1_0,return_tensors='pt' )
A__ = targets["input_ids"]
A__ = shift_tokens_right(lowercase_,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1],3 )
self.assertEqual(batch.decoder_input_ids.shape[1],1_0 )
@require_torch
def snake_case__ ( self : int )-> str:
'''simple docstring'''
A__ = self.tokenizer._build_translation_inputs(
'A test',return_tensors='pt',src_lang='en_XX',tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(lowercase_ ),{
# en_XX, A, test, EOS
'input_ids': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_0_0_0_1,
},)
| 7 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =CustomTokenizer
pass | 76 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ (__A ):
UpperCAmelCase__ = 4_2
UpperCAmelCase__ = 4_2
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self , _A = 1 , _A = 2_0_0_0 , _A = None , _A = "pil" , _A = True , **_A , ):
'''simple docstring'''
UpperCAmelCase = self.unet.config.sample_size
UpperCAmelCase = (batch_size, 3, img_size, img_size)
UpperCAmelCase = self.unet
UpperCAmelCase = randn_tensor(_A , generator=_A ) * self.scheduler.init_noise_sigma
UpperCAmelCase = sample.to(self.device )
self.scheduler.set_timesteps(_A )
self.scheduler.set_sigmas(_A )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase = self.unet(_A , _A ).sample
UpperCAmelCase = self.scheduler.step_correct(_A , _A , generator=_A ).prev_sample
# prediction step
UpperCAmelCase = model(_A , _A ).sample
UpperCAmelCase = self.scheduler.step_pred(_A , _A , _A , generator=_A )
UpperCAmelCase = output.prev_sample, output.prev_sample_mean
UpperCAmelCase = sample_mean.clamp(0 , 1 )
UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_A )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_A )
| 273 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a_ = logging.getLogger(__name__)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = False
def __UpperCamelCase ( self : str , a : str , a : Optional[int] , a : Any , a : str ) -> List[Any]:
"""simple docstring"""
if not self.initialized:
SCREAMING_SNAKE_CASE : List[str] = RagRetriever(
a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , )
SCREAMING_SNAKE_CASE : Optional[int] = True
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self.retriever.index.init_index()
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.retriever._main_retrieve(a , a )
return doc_ids, retrieved_doc_embeds
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Tuple , a : Any , a : Tuple , a : Tuple , a : Tuple , a : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
if index is not None and index.is_initialized() and len(a ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , )
SCREAMING_SNAKE_CASE : Optional[Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(a , a , a , a )
for worker in self.retrieval_workers
] )
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Any ) -> int:
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
SCREAMING_SNAKE_CASE : Optional[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = ray.get(random_worker.retrieve.remote(a , a ) )
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = self._main_retrieve(a , a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a )
@classmethod
def __UpperCamelCase ( cls : str , a : Optional[Any] , a : Any=None , **a : List[Any] ) -> str:
"""simple docstring"""
return super(a , cls ).get_tokenizers(a , a , **a )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , a : int , a : Any , a : List[Any]=None , **a : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = kwargs.pop("config" , a ) or RagConfig.from_pretrained(a , **a )
SCREAMING_SNAKE_CASE : List[Any] = RagTokenizer.from_pretrained(a , config=a )
SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.question_encoder
SCREAMING_SNAKE_CASE : List[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
SCREAMING_SNAKE_CASE : str = "custom"
SCREAMING_SNAKE_CASE : List[Any] = CustomHFIndex(config.retrieval_vector_size , a )
else:
SCREAMING_SNAKE_CASE : List[str] = cls._build_index(a )
return cls(
a , question_encoder_tokenizer=a , generator_tokenizer=a , retrieval_workers=a , index=a , ) | 76 | 0 |
import baseaa
def UpperCamelCase ( _A ):
"""simple docstring"""
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def UpperCamelCase ( _A ):
"""simple docstring"""
return baseaa.aaadecode(_a ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = data
SCREAMING_SNAKE_CASE : int = None
def __repr__( self : str ) -> str:
"""simple docstring"""
return F"Node({self.data})"
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = None
def __iter__( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self : str ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
return "->".join([str(a ) for item in self] )
def __getitem__( self : List[Any] , a : int ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Tuple , a : int , a : Any ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
SCREAMING_SNAKE_CASE : str = self.head
for _ in range(a ):
SCREAMING_SNAKE_CASE : str = current.next
SCREAMING_SNAKE_CASE : Any = data
def __UpperCamelCase ( self : List[str] , a : Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , a )
def __UpperCamelCase ( self : Union[str, Any] , a : Any ) -> None:
"""simple docstring"""
self.insert_nth(0 , a )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Any ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
SCREAMING_SNAKE_CASE : Any = Node(a )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Optional[int] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : List[Any] = new_node
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Optional[int] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next
SCREAMING_SNAKE_CASE : int = new_node
def __UpperCamelCase ( self : Optional[int] ) -> None: # print every node data
"""simple docstring"""
print(self )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def __UpperCamelCase ( self : Any ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __UpperCamelCase ( self : List[str] , a : int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
SCREAMING_SNAKE_CASE : Tuple = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next.next
return delete_node.data
def __UpperCamelCase ( self : List[Any] ) -> bool:
"""simple docstring"""
return self.head is None
def __UpperCamelCase ( self : Optional[int] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : str = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Any = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : List[Any] = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : Any = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : str = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : Optional[Any] = prev
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(_a) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10):
assert len(_a) == i
linked_list.insert_nth(_a , i + 1)
assert str(_a) == "->".join(str(_a) for i in range(1 , 11))
linked_list.insert_head(0)
linked_list.insert_tail(11)
assert str(_a) == "->".join(str(_a) for i in range(0 , 12))
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9) == 10
assert linked_list.delete_tail() == 11
assert len(_a) == 9
assert str(_a) == "->".join(str(_a) for i in range(1 , 10))
assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True
for i in range(0 , 9):
SCREAMING_SNAKE_CASE : str = -i
assert all(linked_list[i] == -i for i in range(0 , 9)) is True
linked_list.reverse()
assert str(_a) == "->".join(str(_a) for i in range(-8 , 1))
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = [
-9,
100,
Node(77345112),
"dlrow olleH",
7,
5555,
0,
-192.5_5555,
"Hello, world!",
77.9,
Node(10),
None,
None,
12.20,
]
SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(_a)
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_a) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : Any = linked_list.delete_nth(10)
assert result is None
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!"))
assert (
str(_a)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_a)
assert (
str(_a)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_a)
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase__ ( ):
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head ").strip())
linked_list.insert_head(input("Inserting 2nd at head ").strip())
print("\nPrint list:")
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail ").strip())
linked_list.insert_tail(input("Inserting 2nd at tail ").strip())
print("\nPrint list:")
linked_list.print_list()
print("\nDelete head")
linked_list.delete_head()
print("Delete tail")
linked_list.delete_tail()
print("\nPrint list:")
linked_list.print_list()
print("\nReverse linked list")
linked_list.reverse()
print("\nPrint list:")
linked_list.print_list()
print("\nString representation of linked list:")
print(_a)
print("\nReading/changing Node data using indexing:")
print(f"Element at Position 1: {linked_list[1]}")
SCREAMING_SNAKE_CASE : Dict = input("Enter New Value: ").strip()
print("New list:")
print(_a)
print(f"length of linked_list is : {len(_a)}")
if __name__ == "__main__":
main() | 76 | 0 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 80 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ = get_logger(__name__)
class _UpperCamelCase ( enum.Enum ):
'''simple docstring'''
lowerCamelCase__ ='all_checks'
lowerCamelCase__ ='basic_checks'
lowerCamelCase__ ='no_checks'
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a , _a=None):
if expected_checksums is None:
logger.info("Unable to verify checksums.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedDownloadedFile(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : str = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE : Tuple = " for " + verification_name if verification_name is not None else ""
if len(_a) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error")
logger.info("All the checksums matched successfully" + for_verification_name)
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a):
if expected_splits is None:
logger.info("Unable to verify splits sizes.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreSplits(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedSplits(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : List[str] = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_a) > 0:
raise NonMatchingSplitsSizesError(str(_a))
logger.info("All the splits matched successfully.")
def lowerCamelCase__ ( _a , _a = True):
if record_checksum:
SCREAMING_SNAKE_CASE : List[str] = shaaaa()
with open(_a , "rb") as f:
for chunk in iter(lambda: f.read(1 << 20) , b""):
m.update(_a)
SCREAMING_SNAKE_CASE : Optional[int] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE : List[str] = None
return {"num_bytes": os.path.getsize(_a), "checksum": checksum}
def lowerCamelCase__ ( _a):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False | 76 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="None" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> Optional[Any]:
UpperCamelCase :Any = parent
UpperCamelCase :List[Any] = batch_size
UpperCamelCase :Tuple = seq_length
UpperCamelCase :List[Any] = is_training
UpperCamelCase :Optional[int] = use_input_mask
UpperCamelCase :Union[str, Any] = use_token_type_ids
UpperCamelCase :Optional[Any] = use_labels
UpperCamelCase :Any = vocab_size
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :Dict = num_hidden_layers
UpperCamelCase :Dict = num_attention_heads
UpperCamelCase :int = intermediate_size
UpperCamelCase :int = hidden_act
UpperCamelCase :Dict = hidden_dropout_prob
UpperCamelCase :int = attention_probs_dropout_prob
UpperCamelCase :Dict = max_position_embeddings
UpperCamelCase :str = type_vocab_size
UpperCamelCase :Union[str, Any] = type_sequence_label_size
UpperCamelCase :Any = initializer_range
UpperCamelCase :str = num_labels
UpperCamelCase :List[str] = num_choices
UpperCamelCase :List[str] = relative_attention
UpperCamelCase :Optional[int] = position_biased_input
UpperCamelCase :int = pos_att_type
UpperCamelCase :List[str] = scope
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :Optional[int] = None
if self.use_input_mask:
UpperCamelCase :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase :Tuple = None
if self.use_token_type_ids:
UpperCamelCase :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase :Optional[Any] = None
UpperCamelCase :List[Any] = None
UpperCamelCase :str = None
if self.use_labels:
UpperCamelCase :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase :List[Any] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Optional[Any] = TFDebertaVaModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase :str = [input_ids, input_mask]
UpperCamelCase :Any = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Optional[int] = TFDebertaVaForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase :str = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase :Union[str, Any] = self.num_labels
UpperCamelCase :Dict = TFDebertaVaForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :List[Any] = self.num_labels
UpperCamelCase :List[Any] = TFDebertaVaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :List[Any] = TFDebertaVaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Dict = self.prepare_config_and_inputs()
(
UpperCamelCase
) :Dict = config_and_inputs
UpperCamelCase :Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A, __A, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str =(
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : List[Any] =(
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : Optional[int] =False
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[int] = TFDebertaVaModelTester(self )
UpperCamelCase :int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Any = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :str = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
UpperCamelCase :Tuple = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase :Any = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase :Dict = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
| 259 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _a , _a):
# Load checkpoint
SCREAMING_SNAKE_CASE : int = torch.load(_a , map_location="cpu")
SCREAMING_SNAKE_CASE : Dict = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : List[str] = v
else:
SCREAMING_SNAKE_CASE : int = v
SCREAMING_SNAKE_CASE : int = chkpt["params"]
SCREAMING_SNAKE_CASE : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray))}
SCREAMING_SNAKE_CASE : List[Any] = chkpt["dico_word2id"]
SCREAMING_SNAKE_CASE : List[Any] = {s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@" , ""): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"Save PyTorch model to {pytorch_weights_dump_path}")
torch.save(_a , _a)
print(f"Save configuration file to {pytorch_config_dump_path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , indent=2) + "\n")
print(f"Save vocab file to {pytorch_config_dump_path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , indent=2) + "\n")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 76 | 0 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _SCREAMING_SNAKE_CASE ( __A ):
lowerCAmelCase__ = ['image_processor', 'tokenizer']
lowerCAmelCase__ = 'FlavaImageProcessor'
lowerCAmelCase__ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , lowercase=None , lowercase=None , **lowercase ) -> str:
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase , )
lowerCamelCase_ = kwargs.pop("feature_extractor" )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase , lowercase )
lowerCamelCase_ = self.image_processor
def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = False , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ) -> Optional[int]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCamelCase_ = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
if images is not None:
lowerCamelCase_ = self.image_processor(
lowercase , return_image_mask=lowercase , return_codebook_pixels=lowercase , return_tensors=lowercase , **lowercase , )
if text is not None and images is not None:
encoding.update(lowercase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> Optional[int]:
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> List[str]:
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.tokenizer.model_input_names
lowerCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_( self ) -> Any:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase , )
return self.image_processor
| 19 |
def lowerCamelCase__ ( _a , _a):
_validate_point(_a)
_validate_point(_a)
if len(_a) != len(_a):
raise ValueError("Both points must be in the same n-dimensional space")
return float(sum(abs(a - b) for a, b in zip(_a , _a)))
def lowerCamelCase__ ( _a):
if point:
if isinstance(_a , _a):
for item in point:
if not isinstance(_a , (int, float)):
SCREAMING_SNAKE_CASE : List[Any] = (
"Expected a list of numbers as input, found "
f"{type(_a).__name__}"
)
raise TypeError(_a)
else:
SCREAMING_SNAKE_CASE : List[Any] = f"Expected a list of numbers as input, found {type(_a).__name__}"
raise TypeError(_a)
else:
raise ValueError("Missing an input")
def lowerCamelCase__ ( _a , _a):
_validate_point(_a)
_validate_point(_a)
if len(_a) != len(_a):
raise ValueError("Both points must be in the same n-dimensional space")
return float(sum(abs(x - y) for x, y in zip(_a , _a)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Optional[Any] = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='vit_msn'
def __init__( self : str , a : Tuple=768 , a : Tuple=12 , a : Any=12 , a : int=3072 , a : List[Any]="gelu" , a : Dict=0.0 , a : int=0.0 , a : str=0.02 , a : List[str]=1e-06 , a : List[Any]=224 , a : Union[str, Any]=16 , a : Union[str, Any]=3 , a : Tuple=True , **a : Dict , ) -> List[Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[str] = qkv_bias | 76 | 0 |
from collections import Counter
from timeit import timeit
def A ( a_ = "" ,) -> List[str]:
return sum(c % 2 for c in Counter(input_str.replace(' ' ,'' ).lower() ).values() ) < 2
def A ( a_ = "" ) -> List[str]:
if len(_a ) == 0:
return True
__UpperCamelCase : str =input_str.replace(' ' ,'' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__UpperCamelCase : dict[str, int] ={}
for character in lower_case_input_str:
__UpperCamelCase : Dict =character_freq_dict.get(_a ,0 ) + 1
__UpperCamelCase : List[Any] =0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( a_ = "" ) -> str:
print('\nFor string = ' ,_a ,':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' ,'\tans =' ,can_string_be_rearranged_as_palindrome_counter(_a ) ,'\ttime =' ,timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' ,setup='import __main__ as z' ,) ,'seconds' ,)
print(
'> can_string_be_rearranged_as_palindrome()' ,'\tans =' ,can_string_be_rearranged_as_palindrome(_a ) ,'\ttime =' ,timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' ,setup='import __main__ as z' ,) ,'seconds' ,)
if __name__ == "__main__":
A_ :str = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
A_ :Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 71 |
import baseaa
def lowerCamelCase__ ( _a):
return baseaa.aaaencode(string.encode("utf-8"))
def lowerCamelCase__ ( _a):
return baseaa.aaadecode(_a).decode("utf-8")
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
"""simple docstring"""
lowercase_ = {str(digit): digit**5 for digit in range(1_0)}
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_a ) )
def lowercase ( ) -> Tuple:
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(_a ) )
if __name__ == "__main__":
print(solution())
| 45 |
from datetime import datetime as dt
import os
from github import Github
a_ = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : int = Github(os.environ["GITHUB_TOKEN"])
SCREAMING_SNAKE_CASE : List[str] = g.get_repo("huggingface/transformers")
SCREAMING_SNAKE_CASE : Optional[int] = repo.get_issues(state="open")
for issue in open_issues:
SCREAMING_SNAKE_CASE : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _a: i.created_at , reverse=_a)
SCREAMING_SNAKE_CASE : str = comments[0] if len(_a) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
if __name__ == "__main__":
main() | 76 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __UpperCAmelCase (__A ):
def __init__( self: Optional[Any] , UpperCAmelCase_: UNetaDModel , UpperCAmelCase_: UNetaDModel , UpperCAmelCase_: DDPMScheduler , UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = value_function
_SCREAMING_SNAKE_CASE = unet
_SCREAMING_SNAKE_CASE = scheduler
_SCREAMING_SNAKE_CASE = env
_SCREAMING_SNAKE_CASE = env.get_dataset()
_SCREAMING_SNAKE_CASE = {}
for key in self.data.keys():
try:
_SCREAMING_SNAKE_CASE = self.data[key].mean()
except: # noqa: E722
pass
_SCREAMING_SNAKE_CASE = {}
for key in self.data.keys():
try:
_SCREAMING_SNAKE_CASE = self.data[key].std()
except: # noqa: E722
pass
_SCREAMING_SNAKE_CASE = env.observation_space.shape[0]
_SCREAMING_SNAKE_CASE = env.action_space.shape[0]
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int ):
'''simple docstring'''
if type(UpperCAmelCase_ ) is dict:
return {k: self.to_torch(UpperCAmelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCAmelCase_ , device=self.unet.device )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
for key, val in cond.items():
_SCREAMING_SNAKE_CASE = val.clone()
return x_in
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Any , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = x.shape[0]
_SCREAMING_SNAKE_CASE = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_SCREAMING_SNAKE_CASE = torch.full((batch_size,) , UpperCAmelCase_ , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCAmelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_SCREAMING_SNAKE_CASE = self.value_function(x.permute(0 , 2 , 1 ) , UpperCAmelCase_ ).sample
_SCREAMING_SNAKE_CASE = torch.autograd.grad([y.sum()] , [x] )[0]
_SCREAMING_SNAKE_CASE = self.scheduler._get_variance(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.exp(0.5 * posterior_variance )
_SCREAMING_SNAKE_CASE = model_std * grad
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = x.detach()
_SCREAMING_SNAKE_CASE = x + scale * grad
_SCREAMING_SNAKE_CASE = self.reset_xa(UpperCAmelCase_ , UpperCAmelCase_ , self.action_dim )
_SCREAMING_SNAKE_CASE = self.unet(x.permute(0 , 2 , 1 ) , UpperCAmelCase_ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_SCREAMING_SNAKE_CASE = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , predict_epsilon=UpperCAmelCase_ )["prev_sample"]
# apply conditions to the trajectory (set the initial state)
_SCREAMING_SNAKE_CASE = self.reset_xa(UpperCAmelCase_ , UpperCAmelCase_ , self.action_dim )
_SCREAMING_SNAKE_CASE = self.to_torch(UpperCAmelCase_ )
return x, y
def __call__( self: Dict , UpperCAmelCase_: int , UpperCAmelCase_: int=64 , UpperCAmelCase_: Union[str, Any]=32 , UpperCAmelCase_: str=2 , UpperCAmelCase_: Tuple=0.1 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.normalize(UpperCAmelCase_ , """observations""" )
_SCREAMING_SNAKE_CASE = obs[None].repeat(UpperCAmelCase_ , axis=0 )
_SCREAMING_SNAKE_CASE = {0: self.to_torch(UpperCAmelCase_ )}
_SCREAMING_SNAKE_CASE = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_SCREAMING_SNAKE_CASE = randn_tensor(UpperCAmelCase_ , device=self.unet.device )
_SCREAMING_SNAKE_CASE = self.reset_xa(UpperCAmelCase_ , UpperCAmelCase_ , self.action_dim )
_SCREAMING_SNAKE_CASE = self.to_torch(UpperCAmelCase_ )
# run the diffusion process
_SCREAMING_SNAKE_CASE = self.run_diffusion(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# sort output trajectories by value
_SCREAMING_SNAKE_CASE = y.argsort(0 , descending=UpperCAmelCase_ ).squeeze()
_SCREAMING_SNAKE_CASE = x[sorted_idx]
_SCREAMING_SNAKE_CASE = sorted_values[:, :, : self.action_dim]
_SCREAMING_SNAKE_CASE = actions.detach().cpu().numpy()
_SCREAMING_SNAKE_CASE = self.de_normalize(UpperCAmelCase_ , key="""actions""" )
# select the action with the highest value
if y is not None:
_SCREAMING_SNAKE_CASE = 0
else:
# if we didn't run value guiding, select a random action
_SCREAMING_SNAKE_CASE = np.random.randint(0 , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = denorm_actions[selected_index, 0]
return denorm_actions
| 306 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
if isinstance(_a , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(_a , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(_a):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}")
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =['pixel_values']
def __init__( self : Optional[Any] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {"shortest_edge": 256}
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE : str = get_size_dict(a , param_name="crop_size" )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop
SCREAMING_SNAKE_CASE : int = crop_size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : Tuple = offset
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : Optional[Any] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(a , size["shortest_edge"] , default_to_square=a )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE : Dict = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a , size=a , resample=a , data_format=a , **a )
def __UpperCamelCase ( self : List[str] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __UpperCamelCase ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : bool = True , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = image.astype(np.floataa )
if offset:
SCREAMING_SNAKE_CASE : Union[str, Any] = image - (scale / 2)
return rescale(a , scale=a , data_format=a , **a )
def __UpperCamelCase ( self : int , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def __UpperCamelCase ( self : Tuple , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : List[str] = to_numpy_array(a )
if do_resize:
SCREAMING_SNAKE_CASE : Optional[Any] = self.resize(image=a , size=a , resample=a )
if do_center_crop:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.center_crop(a , size=a )
if do_rescale:
SCREAMING_SNAKE_CASE : Any = self.rescale(image=a , scale=a , offset=a )
if do_normalize:
SCREAMING_SNAKE_CASE : Tuple = self.normalize(image=a , mean=a , std=a )
SCREAMING_SNAKE_CASE : Optional[int] = to_channel_dimension_format(a , a )
return image
def __UpperCamelCase ( self : Dict , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : int = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : int = size if size is not None else self.size
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(a , param_name="crop_size" )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
SCREAMING_SNAKE_CASE : Optional[int] = make_batched(a )
SCREAMING_SNAKE_CASE : List[Any] = [
[
self._preprocess_image(
image=a , do_resize=a , size=a , resample=a , do_center_crop=a , crop_size=a , do_rescale=a , rescale_factor=a , offset=a , do_normalize=a , image_mean=a , image_std=a , data_format=a , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": videos}
return BatchFeature(data=a , tensor_type=a ) | 76 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
__lowerCAmelCase : Any = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__lowerCAmelCase : List[Any] = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__lowerCAmelCase : Tuple = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
__lowerCAmelCase : Dict = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
__lowerCAmelCase : int = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
__lowerCAmelCase : Dict = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
__lowerCAmelCase : Dict = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
__lowerCAmelCase : str = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
__lowerCAmelCase : Dict = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
__lowerCAmelCase : Optional[int] = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
__lowerCAmelCase : List[Any] = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
__lowerCAmelCase : List[Any] = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
__lowerCAmelCase : Any = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
__lowerCAmelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__lowerCAmelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__lowerCAmelCase : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__lowerCAmelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__lowerCAmelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCAmelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__lowerCAmelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__lowerCAmelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCAmelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__lowerCAmelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__lowerCAmelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__lowerCAmelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__lowerCAmelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__lowerCAmelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Optional[Any] = FLAX_MODEL_MAPPING
__lowerCAmelCase : str = auto_class_update(FlaxAutoModel)
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__lowerCAmelCase : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : List[str] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__lowerCAmelCase : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__lowerCAmelCase : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : List[str] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCAmelCase : str = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Optional[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__lowerCAmelCase : Dict = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__lowerCAmelCase : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__lowerCAmelCase : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Optional[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__lowerCAmelCase : List[str] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : str = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : List[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__lowerCAmelCase : List[str] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Tuple = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__lowerCAmelCase : int = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 156 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
a_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =ConvBertTokenizer
def __init__( self : List[str] , a : Union[str, Any]=None , a : Optional[int]=None , a : int=True , a : Tuple="[UNK]" , a : Dict="[SEP]" , a : Dict="[PAD]" , a : List[Any]="[CLS]" , a : Tuple="[MASK]" , a : Dict=True , a : Optional[Any]=None , **a : str , ) -> Dict:
"""simple docstring"""
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a ) != do_lower_case
or normalizer_state.get("strip_accents" , a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : List[str] = getattr(a , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : Any = strip_accents
SCREAMING_SNAKE_CASE : Optional[int] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**a )
SCREAMING_SNAKE_CASE : str = do_lower_case
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : int=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Tuple , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(a , name=a )
return tuple(a ) | 76 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[int] )-> Optional[Any]:
'''simple docstring'''
debug_launcher(test_script.main )
def snake_case__ ( self : int )-> str:
'''simple docstring'''
debug_launcher(test_ops.main )
| 7 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_a)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : Union[str, Any] = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(_a , id=_a) | 76 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
UpperCAmelCase = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
UpperCAmelCase = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
UpperCAmelCase = tf_top_k_top_p_filtering(_A , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
UpperCAmelCase = output[output != -float('''inf''' )]
UpperCAmelCase = tf.cast(
tf.where(tf.not_equal(_A , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_A , _A , rtol=1E-12 )
tf.debugging.assert_equal(_A , _A )
@require_tf
class A_ (unittest.TestCase , __A ):
if is_tf_available():
UpperCAmelCase__ = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = 2
UpperCAmelCase = 2
class A_ (tf.Module ):
def __init__( self , _A ):
'''simple docstring'''
super(_A , self ).__init__()
UpperCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_A , )
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.model.generate(
input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase = [[2, 0], [1_0_2, 1_0_3]]
UpperCAmelCase = [[1, 0], [1, 1]]
UpperCAmelCase = DummyModel(model=_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_A , _A , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase = tf.saved_model.load(_A ).signatures["serving_default"]
for batch_size in range(1 , len(_A ) + 1 ):
UpperCAmelCase = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
UpperCAmelCase = serving_func(**_A )["sequences"]
UpperCAmelCase = test_model.generate(**_A , max_new_tokens=_A )
tf.debugging.assert_equal(_A , _A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = 1
UpperCAmelCase = 2
class A_ (tf.Module ):
def __init__( self , _A ):
'''simple docstring'''
super(_A , self ).__init__()
UpperCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_A , )
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.model.generate(
input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase = [[2], [1_0_2, 1_0_3]]
UpperCAmelCase = [[1], [1, 1]]
UpperCAmelCase = DummyModel(model=_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_A , _A , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase = tf.saved_model.load(_A ).signatures["serving_default"]
for input_row in range(len(_A ) ):
UpperCAmelCase = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
UpperCAmelCase = serving_func(**_A )["sequences"]
UpperCAmelCase = test_model.generate(**_A , max_new_tokens=_A )
tf.debugging.assert_equal(_A , _A )
@slow
@require_tensorflow_text
def _lowercase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_A )
class A_ (tf.keras.layers.Layer ):
def __init__( self ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_A , '''spiece.model''' ) , '''rb''' ).read() )
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def _lowercase ( self , _A , *_A , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.tokenize(_A )
UpperCAmelCase = text.pad_model_inputs(
_A , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
UpperCAmelCase = self.model.generate(input_ids=_A , attention_mask=_A )
return self.tokenizer.detokenize(_A )
UpperCAmelCase = CompleteSentenceTransformer()
UpperCAmelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
UpperCAmelCase = complete_model(_A )
UpperCAmelCase = tf.keras.Model(_A , _A )
keras_model.save(_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 1_0,
"temperature": 0.7,
}
UpperCAmelCase = 1_4
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = "Hello, my dog is cute and"
UpperCAmelCase = tokenizer(_A , return_tensors='''tf''' )
UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase = model.generate(**_A , eos_token_id=_A , **_A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
UpperCAmelCase = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase = model.generate(**_A , eos_token_id=_A , **_A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase = "Hugging Face is a technology company based in New York and Paris."
UpperCAmelCase = bart_tokenizer(_A , return_tensors='''tf''' ).input_ids
UpperCAmelCase = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase = bart_model.generate(_A ).numpy()
class A_ (__A ):
def _lowercase ( self , _A , _A=None , **_A ):
'''simple docstring'''
return super().call(_A , **_A )
UpperCAmelCase = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase = bart_model.generate(_A , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_A , _A ) )
class A_ (bart_model.model.encoder.__class__ ):
def _lowercase ( self , _A , **_A ):
'''simple docstring'''
return super().call(_A , **_A )
UpperCAmelCase = FakeEncoder(bart_model.config , bart_model.model.shared )
UpperCAmelCase = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
UpperCAmelCase = bart_model.generate(_A ).numpy()
with self.assertRaises(_A ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_A , foo='''bar''' )
| 273 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , a : int , a : Optional[int]=13 , a : Optional[int]=3 , a : int=224 , a : Optional[int]=30 , a : int=400 , a : Union[str, Any]=True , a : int=None , a : Tuple=True , a : Tuple=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : str = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : int = do_normalize
SCREAMING_SNAKE_CASE : Tuple = image_mean
SCREAMING_SNAKE_CASE : Tuple = image_std
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , ) | 76 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
__magic_name__: Optional[Any] = datasets.logging.get_logger(__name__)
__magic_name__: Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
__magic_name__: List[str] = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
__magic_name__: List[Any] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def __magic_name__ ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
if self.config_name == "default":
__magic_name__ : str = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
__magic_name__ : str = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=False ) -> int:
if gpus is None:
__magic_name__ : Optional[Any] = 1 if torch.cuda.is_available() else 0
__magic_name__ : List[Any] = {"src": sources, "mt": predictions, "ref": references}
__magic_name__ : Dict = [dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) for t in zip(*data.values() )]
__magic_name__ : Tuple = self.scorer.predict(lowerCAmelCase__ , gpus=lowerCAmelCase__ , progress_bar=lowerCAmelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 342 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Any = tokenizer(example["content"] , truncation=_a)["input_ids"]
SCREAMING_SNAKE_CASE : Dict = len(example["content"]) / len(output["input_ids"])
return output
a_ = HfArgumentParser(PretokenizationArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a_ = time.time()
a_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 76 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[str] = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 80 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
# initialize config
if "resnet-50" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-50")
elif "resnet-101" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-101")
else:
raise ValueError("Model name should include either resnet50 or resnet101")
SCREAMING_SNAKE_CASE : str = DetrConfig(use_timm_backbone=_a , backbone_config=_a)
# set label attributes
SCREAMING_SNAKE_CASE : List[str] = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE : Union[str, Any] = 250
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 91
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Union[str, Any] = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r"))
SCREAMING_SNAKE_CASE : int = {int(_a): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase__ ( _a):
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight"))
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight"))
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias"))
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean"))
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var"))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
))
# 3 convs
for i in range(3):
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
f"encoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
f"decoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
])
return rename_keys
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : str = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : int = val
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE : Optional[Any] = ""
if is_panoptic:
SCREAMING_SNAKE_CASE : Optional[int] = "detr."
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(_a , stream=_a).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( _a , _a=None , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = get_detr_config(_a)
# load original model from torch hub
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f"Converting model {model_name}...")
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_a).eval()
SCREAMING_SNAKE_CASE : Tuple = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_a):
if is_panoptic:
SCREAMING_SNAKE_CASE : List[str] = "detr." + src
rename_key(_a , _a , _a)
# query, key and value matrices need special treatment
read_in_q_k_v(_a , is_panoptic=_a)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE : List[Any] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr")
and not key.startswith("class_labels_classifier")
and not key.startswith("bbox_predictor")
):
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Optional[int] = val
elif key.startswith("bbox_attention") or key.startswith("mask_head"):
continue
else:
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : List[Any] = val
else:
if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Any = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : int = DetrForSegmentation(_a) if is_panoptic else DetrForObjectDetection(_a)
model.load_state_dict(_a)
model.eval()
# verify our conversion on an image
SCREAMING_SNAKE_CASE : int = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE : Optional[int] = DetrImageProcessor(format=_a)
SCREAMING_SNAKE_CASE : List[str] = processor(images=prepare_img() , return_tensors="pt")
SCREAMING_SNAKE_CASE : Any = encoding["pixel_values"]
SCREAMING_SNAKE_CASE : Optional[Any] = detr(_a)
SCREAMING_SNAKE_CASE : Any = model(_a)
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3)
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
Path(_a).mkdir(exist_ok=_a)
model.save_pretrained(_a)
processor.save_pretrained(_a)
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub...")
model.push_to_hub(f"nielsr/{model_name}")
processor.push_to_hub(f"nielsr/{model_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
a_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 76 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , ) -> Dict:
UpperCamelCase :Any = size if size is not None else {"height": 18, "width": 18}
UpperCamelCase :Optional[int] = parent
UpperCamelCase :Optional[int] = batch_size
UpperCamelCase :Tuple = num_channels
UpperCamelCase :Any = image_size
UpperCamelCase :List[Any] = min_resolution
UpperCamelCase :str = max_resolution
UpperCamelCase :List[Any] = do_resize
UpperCamelCase :Optional[Any] = size
UpperCamelCase :Dict = do_normalize
def UpperCAmelCase ( self ) -> Optional[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( __A, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str =ImageGPTImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Union[str, Any] = ImageGPTImageProcessingTester(self )
@property
def UpperCAmelCase ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''clusters''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCamelCase :List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase :Any = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase :int = os.path.join(SCREAMING_SNAKE_CASE_ , '''image_processor.json''' )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE_ ).to_dict()
UpperCamelCase :Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE_ ).to_dict()
UpperCamelCase :Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def UpperCAmelCase ( self ) -> Any:
pass
def _A ( ):
UpperCamelCase :List[Any] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
UpperCamelCase :Dict = Image.open(dataset[4]['''file'''] )
UpperCamelCase :List[Any] = Image.open(dataset[5]['''file'''] )
UpperCamelCase :List[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[Any] = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
UpperCamelCase :Dict = prepare_images()
# test non-batched
UpperCamelCase :Optional[Any] = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
UpperCamelCase :Optional[Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE_ )
# test batched
UpperCamelCase :Dict = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
UpperCamelCase :Any = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE_ )
| 259 |
import os
def lowerCamelCase__ ( ):
with open(os.path.dirname(_a) + "/p022_names.txt") as file:
SCREAMING_SNAKE_CASE : List[str] = str(file.readlines()[0])
SCREAMING_SNAKE_CASE : List[Any] = names.replace("\"" , "").split(",")
names.sort()
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Dict = 0
for i, name in enumerate(_a):
for letter in name:
name_score += ord(_a) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE : str = 0
return total_score
if __name__ == "__main__":
print(solution()) | 76 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__A =version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , ):
output_path.parent.mkdir(parents=_a , exist_ok=_a )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_a , _a , f=output_path.as_posix() , input_names=_a , output_names=_a , dynamic_axes=_a , do_constant_folding=_a , use_external_data_format=_a , enable_onnx_checker=_a , opset_version=_a , )
else:
export(
_a , _a , f=output_path.as_posix() , input_names=_a , output_names=_a , dynamic_axes=_a , do_constant_folding=_a , opset_version=_a , )
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase_ = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
lowerCamelCase_ = "cpu"
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(_a , torch_dtype=_a ).to(_a )
lowerCamelCase_ = Path(_a )
# TEXT ENCODER
lowerCamelCase_ = pipeline.text_encoder.config.max_position_embeddings
lowerCamelCase_ = pipeline.text_encoder.config.hidden_size
lowerCamelCase_ = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=_a , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_a , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=_a , )
del pipeline.text_encoder
# UNET
lowerCamelCase_ = pipeline.unet.config.in_channels
lowerCamelCase_ = pipeline.unet.config.sample_size
lowerCamelCase_ = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _a , _a , _a ).to(device=_a , dtype=_a ),
torch.randn(2 ).to(device=_a , dtype=_a ),
torch.randn(2 , _a , _a ).to(device=_a , dtype=_a ),
False,
) , output_path=_a , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=_a , use_external_data_format=_a , )
lowerCamelCase_ = str(unet_path.absolute().as_posix() )
lowerCamelCase_ = os.path.dirname(_a )
lowerCamelCase_ = onnx.load(_a )
# clean up existing tensor files
shutil.rmtree(_a )
os.mkdir(_a )
# collate external tensor files into one
onnx.save_model(
_a , _a , save_as_external_data=_a , all_tensors_to_one_file=_a , location="weights.pb" , convert_attribute=_a , )
del pipeline.unet
# VAE ENCODER
lowerCamelCase_ = pipeline.vae
lowerCamelCase_ = vae_encoder.config.in_channels
lowerCamelCase_ = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCamelCase_ = lambda lowerCamelCase__ , lowerCamelCase__ : vae_encoder.encode(_a , _a )[0].sample()
onnx_export(
_a , model_args=(
torch.randn(1 , _a , _a , _a ).to(device=_a , dtype=_a ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_a , )
# VAE DECODER
lowerCamelCase_ = pipeline.vae
lowerCamelCase_ = vae_decoder.config.latent_channels
lowerCamelCase_ = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCamelCase_ = vae_encoder.decode
onnx_export(
_a , model_args=(
torch.randn(1 , _a , _a , _a ).to(device=_a , dtype=_a ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_a , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCamelCase_ = pipeline.safety_checker
lowerCamelCase_ = safety_checker.config.vision_config.num_channels
lowerCamelCase_ = safety_checker.config.vision_config.image_size
lowerCamelCase_ = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _a , _a , _a , ).to(device=_a , dtype=_a ),
torch.randn(1 , _a , _a , _a ).to(device=_a , dtype=_a ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=_a , )
del pipeline.safety_checker
lowerCamelCase_ = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
lowerCamelCase_ = pipeline.feature_extractor
else:
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=_a , feature_extractor=_a , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_a )
print("ONNX pipeline saved to" , _a )
del pipeline
del onnx_pipeline
lowerCamelCase_ = OnnxStableDiffusionPipeline.from_pretrained(_a , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
__A =parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 19 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size))
SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,))
SCREAMING_SNAKE_CASE : int = ya
SCREAMING_SNAKE_CASE : int = xa
for k in range(_a):
SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k])
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : Optional[Any] = BlipImageProcessor()
snake_case__ : Optional[Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
snake_case__ : List[str] = BlipProcessor(__UpperCamelCase , __UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def __a ( self , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).tokenizer
def __a ( self , **__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).image_processor
def __a ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case__ : Optional[Any] = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
snake_case__ : List[Any] = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 )
snake_case__ : Union[str, Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Any = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : str = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
snake_case__ : Tuple = self.prepare_image_inputs()
snake_case__ : Tuple = image_processor(__UpperCamelCase , return_tensors='np' )
snake_case__ : Dict = processor(images=__UpperCamelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : Optional[Any] = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
snake_case__ : List[str] = "lower newer"
snake_case__ : Tuple = processor(text=__UpperCamelCase )
snake_case__ : str = tokenizer(__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Dict = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : List[str] = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
snake_case__ : Any = "lower newer"
snake_case__ : Optional[int] = self.prepare_image_inputs()
snake_case__ : Any = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : str = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : List[str] = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
snake_case__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : Union[str, Any] = processor.batch_decode(__UpperCamelCase )
snake_case__ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : List[str] = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
snake_case__ : Optional[int] = "lower newer"
snake_case__ : Any = self.prepare_image_inputs()
snake_case__ : Union[str, Any] = processor(text=__UpperCamelCase , images=__UpperCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 143 |
def lowerCamelCase__ ( _a , _a):
return int((input_a, input_a).count(1) != 0)
def lowerCamelCase__ ( ):
assert or_gate(0 , 0) == 0
assert or_gate(0 , 1) == 1
assert or_gate(1 , 0) == 1
assert or_gate(1 , 1) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1)) | 76 | 0 |
import operator as op
def A ( a_ ) -> List[Any]:
__UpperCamelCase : List[str] =[]
__UpperCamelCase : Union[str, Any] =lambda a_ ,a_ : int(x / y ) # noqa: E731 integer division operation
__UpperCamelCase : Tuple ={
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) ,'Action'.center(12 ) ,'Stack' ,sep=' | ' )
print('-' * (30 + len(_a )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_a ) # append x to stack
# output in tabular format
print(x.rjust(8 ) ,('push(' + x + ')').ljust(12 ) ,','.join(_a ) ,sep=' | ' )
else:
__UpperCamelCase : List[Any] =stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) ,('pop(' + b + ')').ljust(12 ) ,','.join(_a ) ,sep=' | ' )
__UpperCamelCase : Optional[int] =stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) ,('pop(' + a + ')').ljust(12 ) ,','.join(_a ) ,sep=' | ' )
stack.append(
str(opr[x](int(_a ) ,int(_a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) ,('push(' + a + x + b + ')').ljust(12 ) ,','.join(_a ) ,sep=' | ' ,)
return int(stack[0] )
if __name__ == "__main__":
A_ :Tuple = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 71 |
a_ = 8.314_4598
def lowerCamelCase__ ( _a , _a):
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K")
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol")
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a_ = 300
a_ = 28
a_ = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''') | 76 | 0 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple ) -> Optional[int]:
if args.student_type == "roberta":
__a = False
elif args.student_type == "gpt2":
__a = False
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
if args.student_type == "roberta":
__a = False
def lowercase ( ) -> List[Any]:
__a = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=_a , required=_a , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=_a , required=_a , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=_a , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=_a , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=_a , required=_a , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=_a , type=_a , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=_a , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=_a , required=_a , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=_a , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=_a , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=_a , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=_a , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=_a , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=_a , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=_a , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=_a , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=_a , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=_a , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=_a , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=_a , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=_a , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=_a , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_a , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=_a , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=_a , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=_a , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=_a , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=_a , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=_a , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_a , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=_a , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=_a , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=_a , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=_a , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=_a , default=4000 , help='''Checkpoint interval.''' )
__a = parser.parse_args()
sanity_checks(_a )
# ARGS #
init_gpu_params(_a )
set_seed(_a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(_a ) , _a , indent=4 )
git_log(args.dump_path )
__a = MODEL_CLASSES[args.student_type]
__a = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__a = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__a = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__a = tokenizer.all_special_tokens.index(_a )
__a = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
__a = special_tok_ids
__a = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__a = pickle.load(_a )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__a = pickle.load(_a )
__a = np.maximum(_a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__a = 0.0 # do not predict special tokens
__a = torch.from_numpy(_a )
else:
__a = None
__a = LmSeqsDataset(params=_a , data=_a )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
__a = student_config_class.from_pretrained(args.student_config )
__a = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__a = student_model_class.from_pretrained(args.student_pretrained_weights , config=_a )
else:
__a = student_model_class(_a )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__a = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_a )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_a , _a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_a , _a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__a = Distiller(
params=_a , dataset=_a , token_probs=_a , student=_a , teacher=_a )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 45 |
a_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : int = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE : int = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
# get the last node from the path
SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE : List[str] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE : List[Any] = list(_a)
new_path.append(_a)
queue.append(_a)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_a)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase__ ( _a , _a , _a):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE : str = [start]
SCREAMING_SNAKE_CASE : Optional[Any] = set(_a)
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE : Union[str, Any] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
if node == target:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_a)
queue.append(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4 | 76 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase :
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[str]=13 , UpperCAmelCase_: Optional[Any]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: List[str]=99 , UpperCAmelCase_: Any=32 , UpperCAmelCase_: List[str]=5 , UpperCAmelCase_: List[Any]=4 , UpperCAmelCase_: Dict=37 , UpperCAmelCase_: List[str]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: Optional[int]=128 , UpperCAmelCase_: Dict=32 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Dict=0.02 , UpperCAmelCase_: str=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: str ):
'''simple docstring'''
(
_SCREAMING_SNAKE_CASE
) = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = NezhaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Any , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: str , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = NezhaModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: Any , UpperCAmelCase_: str , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Tuple , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Any , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , next_sentence_label=UpperCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase ( self: str , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: int , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Tuple , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
_SCREAMING_SNAKE_CASE
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (__A ,__A ,__A ,unittest.TestCase ):
__snake_case : List[str] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : Tuple = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : List[Any] = True
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: str=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
return inputs_dict
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = NezhaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
(
_SCREAMING_SNAKE_CASE
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_SCREAMING_SNAKE_CASE = None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@slow
@require_torch_gpu
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.jit.trace(
UpperCAmelCase_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , """bert.pt""" ) )
_SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(UpperCAmelCase_ , """bert.pt""" ) , map_location=UpperCAmelCase_ )
loaded(inputs_dict["""input_ids"""].to(UpperCAmelCase_ ) , inputs_dict["""attention_mask"""].to(UpperCAmelCase_ ) )
@require_torch
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : Tuple = tokenizer("Hello there" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : str = model(a , labels=a ).loss
SCREAMING_SNAKE_CASE : Any = -tf.math.reduce_mean(a ).numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 ) | 76 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__lowerCAmelCase : Optional[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , _snake_case : List[Any] ):
super().__init__()
__lowercase : List[str] = torchvision.models.resnetaaa(pretrained=_snake_case )
__lowercase : Any = list(model.children() )[:-2]
__lowercase : Optional[int] = nn.Sequential(*_snake_case )
__lowercase : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def snake_case_ ( self : Union[str, Any] , _snake_case : List[Any] ):
__lowercase : Optional[Any] = self.pool(self.model(_snake_case ) )
__lowercase : int = torch.flatten(_snake_case , start_dim=2 )
__lowercase : int = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self : Optional[int] , _snake_case : str , _snake_case : Tuple , _snake_case : int , _snake_case : Tuple , _snake_case : List[Any] ):
__lowercase : Union[str, Any] = [json.loads(_snake_case ) for l in open(_snake_case )]
__lowercase : int = os.path.dirname(_snake_case )
__lowercase : Dict = tokenizer
__lowercase : Tuple = labels
__lowercase : int = len(_snake_case )
__lowercase : Optional[Any] = max_seq_length
__lowercase : Optional[Any] = transforms
def __len__( self : List[str] ):
return len(self.data )
def __getitem__( self : List[str] , _snake_case : str ):
__lowercase : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=_snake_case ) )
__lowercase : str = sentence[0], sentence[1:-1], sentence[-1]
__lowercase : List[Any] = sentence[: self.max_seq_length]
__lowercase : Union[str, Any] = torch.zeros(self.n_classes )
__lowercase : int = 1
__lowercase : List[Any] = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
__lowercase : str = self.transforms(_snake_case )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def snake_case_ ( self : str ):
__lowercase : Optional[Any] = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def UpperCAmelCase_ ( __lowerCAmelCase ) -> str:
__lowercase : str = [len(row['''sentence'''] ) for row in batch]
__lowercase : List[Any] = len(_a ), max(_a )
__lowercase : Any = torch.zeros(_a , _a , dtype=torch.long )
__lowercase : List[str] = torch.zeros(_a , _a , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_a , _a ) ):
__lowercase : str = input_row["sentence"]
__lowercase : Any = 1
__lowercase : str = torch.stack([row['''image'''] for row in batch] )
__lowercase : str = torch.stack([row['''label'''] for row in batch] )
__lowercase : Tuple = torch.stack([row['''image_start_token'''] for row in batch] )
__lowercase : Optional[int] = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def UpperCAmelCase_ ( ) -> List[Any]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def UpperCAmelCase_ ( ) -> Any:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 156 |
from math import factorial
def lowerCamelCase__ ( _a , _a , _a):
if successes > trials:
raise ValueError("successes must be lower or equal to trials")
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers")
if not isinstance(_a , _a) or not isinstance(_a , _a):
raise ValueError("the function is defined for non-negative integers")
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0")
SCREAMING_SNAKE_CASE : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
SCREAMING_SNAKE_CASE : List[Any] = float(factorial(_a))
coefficient /= factorial(_a) * factorial(trials - successes)
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75)) | 76 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.