code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , ):
_lowercase : Optional[int] = parent
_lowercase : List[str] = 1_3
_lowercase : Union[str, Any] = 7
_lowercase : Optional[Any] = True
_lowercase : Optional[Any] = True
_lowercase : Any = False
_lowercase : int = True
_lowercase : int = 9_9
_lowercase : Tuple = 3_2
_lowercase : List[str] = 2
_lowercase : Union[str, Any] = 4
_lowercase : int = 3_7
_lowercase : Optional[Any] = 'gelu'
_lowercase : Optional[Any] = 0.1
_lowercase : Dict = 0.1
_lowercase : Tuple = 5_1_2
_lowercase : Optional[int] = 1_6
_lowercase : Any = 2
_lowercase : Union[str, Any] = 0.02
_lowercase : str = 3
_lowercase : Any = 4
_lowercase : List[Any] = None
def __a ( self ):
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Tuple = None
if self.use_input_mask:
_lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
_lowercase : int = None
_lowercase : Dict = None
if self.use_labels:
_lowercase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Optional[int] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = TFDistilBertModel(config=_lowerCAmelCase )
_lowercase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowercase : Tuple = model(_lowerCAmelCase )
_lowercase : List[str] = [input_ids, input_mask]
_lowercase : Dict = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = TFDistilBertForMaskedLM(config=_lowerCAmelCase )
_lowercase : str = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowercase : Dict = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFDistilBertForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
_lowercase : Any = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = self.num_labels
_lowercase : Optional[Any] = TFDistilBertForSequenceClassification(_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowercase : Tuple = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = self.num_choices
_lowercase : List[Any] = TFDistilBertForMultipleChoice(_lowerCAmelCase )
_lowercase : str = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : List[Any] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : List[str] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.num_labels
_lowercase : Optional[int] = TFDistilBertForTokenClassification(_lowerCAmelCase )
_lowercase : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self ):
_lowercase : Optional[int] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : int = config_and_inputs
_lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Tuple = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_UpperCamelCase : List[Any] = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[Any] = False
def __a ( self ):
_lowercase : Dict = TFDistilBertModelTester(self )
_lowercase : Optional[Any] = ConfigTester(self , config_class=_lowerCAmelCase , dim=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_lowercase : str = TFDistilBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : List[Any] = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
_lowercase : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowercase : Union[str, Any] = model(_lowerCAmelCase )[0]
_lowercase : Any = [1, 6, 7_6_8]
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : str = tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 66 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[Any] = torch.nn.Linear(1_0 , 1_0 )
_lowercase : Any = torch.optim.SGD(model.parameters() , 0.1 )
_lowercase : str = Accelerator()
_lowercase : Any = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 66 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ):
_lowercase : Any = parent
_lowercase : int = batch_size
_lowercase : Dict = seq_length
_lowercase : List[Any] = is_training
_lowercase : Any = use_attention_mask
_lowercase : Tuple = use_token_type_ids
_lowercase : List[Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : str = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Any = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Any = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = type_sequence_label_size
_lowercase : Dict = initializer_range
_lowercase : Dict = num_choices
def __a ( self ):
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : List[Any] = None
if self.use_attention_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Union[str, Any] = None
if self.use_token_type_ids:
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self ):
_lowercase : int = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self ):
_lowercase : List[str] = FlaxAlbertModelTester(self )
@slow
def __a ( self ):
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('albert-base-v2' )
_lowercase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Any = FlaxAlbertModel.from_pretrained('albert-base-v2' )
_lowercase : List[Any] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowercase : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowercase : List[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
_lowercase : int = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : Dict = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
| 66 |
import requests
from bsa import BeautifulSoup
def __magic_name__ ( SCREAMING_SNAKE_CASE = "AAPL" ) -> str:
_lowercase : str = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_lowercase : int = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , 'html.parser' )
_lowercase : List[str] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 66 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[int] = "fnet"
def __init__( self , _lowerCAmelCase=3_2_0_0_0 , _lowerCAmelCase=7_6_8 , _lowerCAmelCase=1_2 , _lowerCAmelCase=3_0_7_2 , _lowerCAmelCase="gelu_new" , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=4 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=False , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=3 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : str = vocab_size
_lowercase : Optional[int] = max_position_embeddings
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = num_hidden_layers
_lowercase : List[str] = intermediate_size
_lowercase : List[str] = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Any = initializer_range
_lowercase : Any = type_vocab_size
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Dict = use_tpu_fourier_optimizations
_lowercase : Any = tpu_short_seq_length
| 66 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["PoolFormerFeatureExtractor"]
UpperCamelCase = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
UpperCamelCase = logging.get_logger(__name__)
def __magic_name__ ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
_UpperCamelCase : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
_UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Use FP16 to accelerate inference."} )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Benchmark training of model"} )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Verbose memory tracing"} )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Trace memory line by line"} )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Save result to a CSV file"} )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Save all print statements in a log file"} )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Whether to print environment information"} )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
_UpperCamelCase : str = field(
default=F"""inference_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv."} , )
_UpperCamelCase : str = field(
default=F"""inference_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
_UpperCamelCase : str = field(
default=F"""train_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
_UpperCamelCase : str = field(
default=F"""train_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
_UpperCamelCase : str = field(
default=F"""env_info_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving environment information."} , )
_UpperCamelCase : str = field(
default=F"""log_{round(time() )}.csv""" , metadata={"help": "Log filename used if print statements are saved in log."} , )
_UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __a ( self ):
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , _lowerCAmelCase , )
def __a ( self ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __a ( self ):
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def __a ( self ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 66 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = LayoutLMTokenizer
_UpperCamelCase : Union[str, Any] = LayoutLMTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = True
def __a ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __a ( self , **_lowerCAmelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : str = 'UNwant\u00E9d,running'
_lowercase : List[Any] = 'unwanted, running'
return input_text, output_text
def __a ( self ):
_lowercase : Dict = self.tokenizer_class(self.vocab_file )
_lowercase : Dict = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [7, 4, 5, 1_0, 8, 9] )
def __a ( self ):
pass
| 66 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : str = ShapEPipeline
_UpperCamelCase : Any = ["prompt"]
_UpperCamelCase : int = ["prompt"]
_UpperCamelCase : Union[str, Any] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : Optional[Any] = False
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return self.time_input_dim * 4
@property
def __a ( self ):
return 8
@property
def __a ( self ):
_lowercase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_lowerCAmelCase )
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = {
'num_attention_heads': 2,
'attention_head_dim': 1_6,
'embedding_dim': self.time_input_dim,
'num_embeddings': 3_2,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowercase : Optional[Any] = PriorTransformer(**_lowerCAmelCase )
return model
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = {
'param_shapes': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 1_2,
'background': (
0.1,
0.1,
0.1,
),
}
_lowercase : List[Any] = ShapERenderer(**_lowerCAmelCase )
return model
def __a ( self ):
_lowercase : Optional[Any] = self.dummy_prior
_lowercase : Dict = self.dummy_text_encoder
_lowercase : List[str] = self.dummy_tokenizer
_lowercase : Union[str, Any] = self.dummy_renderer
_lowercase : List[str] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
_lowercase : List[str] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : Optional[Any] = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : List[Any] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 3_2,
'output_type': 'np',
}
return inputs
def __a ( self ):
_lowercase : Optional[int] = 'cpu'
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : Tuple = self.pipeline_class(**_lowerCAmelCase )
_lowercase : Union[str, Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Union[str, Any] = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
_lowercase : str = output.images[0]
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
_lowercase : str = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self ):
_lowercase : List[Any] = torch_device == 'cpu'
_lowercase : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def __a ( self ):
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : Optional[int] = self.pipeline_class(**_lowerCAmelCase )
_lowercase : Any = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : str = 1
_lowercase : Optional[int] = 2
_lowercase : List[str] = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
_lowercase : int = batch_size * [inputs[key]]
_lowercase : Optional[int] = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
_lowercase : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
_lowercase : List[str] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Tuple = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowercase : int = pipe(
'a shark' , generator=_lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 66 | 1 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : int = model.config
_lowercase : Tuple = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowercase : Any = MBartConfig(
is_decoder=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , add_cross_attention=SCREAMING_SNAKE_CASE , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=SCREAMING_SNAKE_CASE , add_final_layer_norm=SCREAMING_SNAKE_CASE , )
return encoder_config, decoder_config
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
if "encoder.model" in name:
_lowercase : Any = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
_lowercase : str = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
_lowercase : List[str] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
_lowercase : int = 'encoder.' + name
if "attn.proj" in name:
_lowercase : List[str] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
_lowercase : Optional[int] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowercase : str = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowercase : Optional[int] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[Any] = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
_lowercase : Optional[int] = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
_lowercase : str = 'encoder.layernorm.bias'
return name
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
_lowercase : str = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Tuple = int(key_split[3] )
_lowercase : Any = int(key_split[5] )
_lowercase : str = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowercase : Optional[Any] = val[:dim, :]
_lowercase : List[str] = val[dim : dim * 2, :]
_lowercase : Tuple = val[-dim:, :]
else:
_lowercase : List[Any] = val[:dim]
_lowercase : List[Any] = val[dim : dim * 2]
_lowercase : int = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowercase : List[str] = val
return orig_state_dict
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False ) -> Dict:
# load original model
_lowercase : str = DonutModel.from_pretrained(SCREAMING_SNAKE_CASE ).eval()
# load HuggingFace model
_lowercase , _lowercase : Any = get_configs(SCREAMING_SNAKE_CASE )
_lowercase : Any = DonutSwinModel(SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = MBartForCausalLM(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = VisionEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
model.eval()
_lowercase : List[str] = original_model.state_dict()
_lowercase : str = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify results on scanned document
_lowercase : List[str] = load_dataset('hf-internal-testing/example-documents' )
_lowercase : Optional[int] = dataset['test'][0]['image'].convert('RGB' )
_lowercase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE , from_slow=SCREAMING_SNAKE_CASE )
_lowercase : Tuple = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowercase : str = DonutProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = processor(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowercase : Tuple = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
_lowercase : int = 'When is the coffee break?'
_lowercase : Optional[int] = task_prompt.replace('{user_input}' , SCREAMING_SNAKE_CASE )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowercase : Any = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowercase : List[Any] = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowercase : Optional[Any] = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowercase : List[str] = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowercase : Tuple = 'hello world'
else:
raise ValueError('Model name not supported' )
_lowercase : Dict = original_model.decoder.tokenizer(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors='pt' )[
'input_ids'
]
_lowercase : Any = original_model.encoder.model.patch_embed(SCREAMING_SNAKE_CASE )
_lowercase , _lowercase : int = model.encoder.embeddings(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
# verify encoder hidden states
_lowercase : str = original_model.encoder(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = model.encoder(SCREAMING_SNAKE_CASE ).last_hidden_state
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-2 )
# verify decoder hidden states
_lowercase : List[Any] = original_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).logits
_lowercase : str = model(SCREAMING_SNAKE_CASE , decoder_input_ids=SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
UpperCamelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 |
import sys
UpperCamelCase = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[Any] = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def __magic_name__ ( SCREAMING_SNAKE_CASE = N ) -> int:
_lowercase : Dict = -sys.maxsize - 1
_lowercase : Tuple = n[:13]
_lowercase : List[Any] = 13
while cur_index < len(SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_lowercase : List[str] = substr[1:] + n[cur_index]
cur_index += 1
else:
_lowercase : str = max(SCREAMING_SNAKE_CASE , str_eval(SCREAMING_SNAKE_CASE ) )
_lowercase : Dict = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[float, float]:
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_lowercase , _lowercase , _lowercase : Tuple = equationa
_lowercase , _lowercase , _lowercase : Dict = equationa
# Calculate the determinants of the matrices
_lowercase : str = aa * ba - aa * ba
_lowercase : Any = ca * ba - ca * ba
_lowercase : Optional[int] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase : Union[str, Any] = determinant_x / determinant
_lowercase : Tuple = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 66 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 1 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase_ :
@staticmethod
def __a ( *_lowerCAmelCase , **_lowerCAmelCase ):
pass
@is_pipeline_test
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@require_torch
def __a ( self ):
_lowercase : Union[str, Any] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_lowercase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowercase : List[Any] = image_classifier(_lowerCAmelCase , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowerCAmelCase ) , [
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}],
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}],
] , )
_lowercase : List[str] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
],
] , )
@require_tf
def __a ( self ):
_lowercase : Dict = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
_lowercase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowercase : List[Any] = image_classifier(_lowerCAmelCase , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}] , )
_lowercase : Optional[Any] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
],
[
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
{'score': 0.3_33, 'label': ANY(_lowerCAmelCase )},
],
] , )
@slow
@require_torch
def __a ( self ):
_lowercase : Dict = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_lowercase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowercase : Union[str, Any] = image_classifier(_lowerCAmelCase , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
_lowercase : Dict = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def __a ( self ):
_lowercase : List[str] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
_lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowercase : int = image_classifier(_lowerCAmelCase , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
_lowercase : Any = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
| 66 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : str = ["image_processor", "tokenizer"]
_UpperCamelCase : Union[str, Any] = "AutoImageProcessor"
_UpperCamelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Union[str, Any] = self.image_processor
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if images is not None:
_lowercase : Union[str, Any] = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and images is not None:
_lowercase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 66 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 66 |
from __future__ import annotations
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]:
if num <= 0:
_lowercase : List[str] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = [True] * (num + 1)
_lowercase : Union[str, Any] = []
_lowercase : Dict = 2
_lowercase : Union[str, Any] = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
_lowercase : str = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 66 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Union[str, Any] = "t5"
_UpperCamelCase : Any = ["past_key_values"]
_UpperCamelCase : Dict = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _lowerCAmelCase=3_2_1_2_8 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=6_4 , _lowerCAmelCase=2_0_4_8 , _lowerCAmelCase=6 , _lowerCAmelCase=None , _lowerCAmelCase=8 , _lowerCAmelCase=3_2 , _lowerCAmelCase=1_2_8 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-6 , _lowerCAmelCase=1.0 , _lowerCAmelCase="relu" , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0 , _lowerCAmelCase=1 , **_lowerCAmelCase , ):
_lowercase : Tuple = vocab_size
_lowercase : Union[str, Any] = d_model
_lowercase : Tuple = d_kv
_lowercase : str = d_ff
_lowercase : Union[str, Any] = num_layers
_lowercase : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowercase : List[Any] = num_heads
_lowercase : List[Any] = relative_attention_num_buckets
_lowercase : Any = relative_attention_max_distance
_lowercase : str = dropout_rate
_lowercase : Any = layer_norm_epsilon
_lowercase : str = initializer_factor
_lowercase : int = feed_forward_proj
_lowercase : Union[str, Any] = use_cache
_lowercase : List[str] = self.feed_forward_proj.split('-' )
_lowercase : str = act_info[-1]
_lowercase : Optional[int] = act_info[0] == 'gated'
if len(_lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowercase : Dict = 'gelu_new'
super().__init__(
pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __snake_case ):
@property
def __a ( self ):
_lowercase : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
_lowercase : Optional[int] = 'past_encoder_sequence + sequence'
_lowercase : Tuple = {0: 'batch'}
_lowercase : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
_lowercase : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs' )
return common_inputs
@property
def __a ( self ):
return 1_3
| 66 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : int = 384
if "tiny" in model_name:
_lowercase : Tuple = [3, 3, 9, 3]
_lowercase : List[str] = [96, 192, 384, 768]
if "small" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : Union[str, Any] = [96, 192, 384, 768]
if "base" in model_name:
_lowercase : List[Any] = [3, 3, 27, 3]
_lowercase : Dict = [128, 256, 512, 1_024]
_lowercase : Optional[int] = 512
if "large" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : List[Any] = [192, 384, 768, 1_536]
_lowercase : Tuple = 768
if "xlarge" in model_name:
_lowercase : str = [3, 3, 27, 3]
_lowercase : List[str] = [256, 512, 1_024, 2_048]
_lowercase : Tuple = 1_024
# set label information
_lowercase : Dict = 150
_lowercase : Union[str, Any] = 'huggingface/label-files'
_lowercase : str = 'ade20k-id2label.json'
_lowercase : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_lowercase : Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
_lowercase : List[str] = ConvNextConfig(
depths=SCREAMING_SNAKE_CASE , hidden_sizes=SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_lowercase : Union[str, Any] = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE , auxiliary_in_channels=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , )
return config
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Any = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Any = dct.pop(SCREAMING_SNAKE_CASE )
_lowercase : Any = val
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : List[Any] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
_lowercase : Optional[int] = model_name_to_url[model_name]
_lowercase : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
_lowercase : Optional[int] = get_upernet_config(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowercase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
if "bn" in key:
_lowercase : Any = key.replace('bn' , 'batch_norm' )
_lowercase : Any = val
# rename keys
_lowercase : int = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify on image
_lowercase : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
_lowercase : Tuple = SegformerImageProcessor()
_lowercase : Tuple = processor(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowercase : Dict = model(SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
_lowercase : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
_lowercase : Union[str, Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
_lowercase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
_lowercase : Optional[int] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
_lowercase : str = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 | 1 |
from math import isqrt
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(SCREAMING_SNAKE_CASE ) + 1 ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE = 10**6 ) -> int:
_lowercase : List[Any] = 0
_lowercase : Dict = 1
_lowercase : Any = 7
while prime_candidate < max_prime:
primes_count += is_prime(SCREAMING_SNAKE_CASE )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = "upernet"
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=3_8_4 , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=2_5_5 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowercase : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[Any] = backbone_config.get('model_type' )
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Tuple = config_class.from_dict(_lowerCAmelCase )
_lowercase : Optional[Any] = backbone_config
_lowercase : Any = hidden_size
_lowercase : Any = initializer_range
_lowercase : Tuple = pool_scales
_lowercase : List[Any] = use_auxiliary_head
_lowercase : Optional[Any] = auxiliary_loss_weight
_lowercase : Any = auxiliary_in_channels
_lowercase : Any = auxiliary_channels
_lowercase : List[str] = auxiliary_num_convs
_lowercase : List[str] = auxiliary_concat_input
_lowercase : Tuple = loss_ignore_index
def __a ( self ):
_lowercase : str = copy.deepcopy(self.__dict__ )
_lowercase : Tuple = self.backbone_config.to_dict()
_lowercase : int = self.__class__.model_type
return output
| 66 | 1 |
from __future__ import annotations
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
_lowercase : int = []
_lowercase , _lowercase : List[Any] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_lowercase : str = result + left + right
return input_list
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) <= 1:
return input_list
_lowercase : List[str] = list(SCREAMING_SNAKE_CASE )
# iteration for two-way merging
_lowercase : List[str] = 2
while p <= len(SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ):
_lowercase : List[Any] = i
_lowercase : Optional[int] = i + p - 1
_lowercase : List[Any] = (low + high + 1) // 2
_lowercase : str = merge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Dict = i
_lowercase : List[str] = merge(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
UpperCamelCase = []
else:
UpperCamelCase = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 66 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowercase : str = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_lowercase : int = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_lowercase : List[Any] = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
UpperCamelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : int = {}
with open(SCREAMING_SNAKE_CASE , 'r' ) as file:
for line_number, line in enumerate(SCREAMING_SNAKE_CASE ):
_lowercase : Any = line.strip()
if line:
_lowercase : str = line.split()
_lowercase : List[Any] = line_number
_lowercase : str = words[0]
_lowercase : str = value
return result
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
for attribute in key.split('.' ):
_lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
_lowercase : int = 'param'
if weight_type is not None and weight_type != "param":
_lowercase : Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
elif weight_type is not None and weight_type == "param":
_lowercase : Optional[Any] = hf_pointer
for attribute in hf_param_name.split('.' ):
_lowercase : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Tuple = shape_pointer.shape
# let's reduce dimension
_lowercase : Any = value[0]
else:
_lowercase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowercase : Tuple = value
elif weight_type == "weight_g":
_lowercase : List[Any] = value
elif weight_type == "weight_v":
_lowercase : Tuple = value
elif weight_type == "bias":
_lowercase : Optional[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
_lowercase : Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = value
else:
_lowercase : Optional[int] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
_lowercase : int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE ):
_lowercase : Any = PARAM_MAPPING[full_name.split('.' )[-1]]
_lowercase : Tuple = 'param'
if weight_type is not None and weight_type != "param":
_lowercase : str = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowercase : Union[str, Any] = '.'.join([key, hf_param_name] )
else:
_lowercase : str = key
_lowercase : Optional[Any] = value if 'lm_head' in full_key else value[0]
UpperCamelCase = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
_lowercase : List[Any] = False
for key, mapped_key in MAPPING.items():
_lowercase : str = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_lowercase : Tuple = True
if "*" in mapped_key:
_lowercase : Optional[Any] = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
_lowercase : Union[str, Any] = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_lowercase : int = 'weight_g'
elif "weight_v" in name:
_lowercase : Any = 'weight_v'
elif "bias" in name:
_lowercase : Tuple = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowercase : Dict = 'weight'
else:
_lowercase : Optional[Any] = None
if hf_dict is not None:
rename_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return is_used
return is_used
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : List[Any] = []
_lowercase : List[str] = fairseq_model.state_dict()
_lowercase : Tuple = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowercase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
_lowercase : List[Any] = True
else:
_lowercase : Optional[int] = load_wavaveca_layer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Optional[int] = full_name.split('conv_layers.' )[-1]
_lowercase : Union[str, Any] = name.split('.' )
_lowercase : str = int(items[0] )
_lowercase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowercase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowercase : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowercase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowercase : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False ) -> Dict:
if config_path is not None:
_lowercase : Any = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = WavaVecaConfig()
if is_seq_class:
_lowercase : int = read_txt_into_dict(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = idalabel
_lowercase : List[Any] = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
elif is_finetuned:
if dict_path:
_lowercase : Tuple = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowercase : int = target_dict.pad_index
_lowercase : List[str] = target_dict.bos_index
_lowercase : Dict = target_dict.eos_index
_lowercase : List[str] = len(target_dict.symbols )
_lowercase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
_lowercase : Any = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowercase : Dict = 0
_lowercase : Union[str, Any] = 1
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Any = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , )
_lowercase : Union[str, Any] = True if config.feat_extract_norm == 'layer' else False
_lowercase : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
_lowercase : str = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = WavaVecaForCTC(SCREAMING_SNAKE_CASE )
else:
_lowercase : Any = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE )
if is_finetuned or is_seq_class:
_lowercase , _lowercase , _lowercase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_lowercase : Optional[Any] = argparse.Namespace(task='audio_pretraining' )
_lowercase : Optional[int] = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE )
_lowercase , _lowercase , _lowercase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE )
_lowercase : List[str] = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 66 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : int = IFInpaintingSuperResolutionPipeline
_UpperCamelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
_UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {"latents"}
def __a ( self ):
return self._get_superresolution_dummy_components()
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : int = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Union[str, Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __a ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __a ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __a ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ):
self._test_save_load_local()
def __a ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 66 | 1 |
import random
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> dict:
_lowercase : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE )
return graph
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict:
return {
i: [j for j in range(SCREAMING_SNAKE_CASE ) if i != j] for i in range(SCREAMING_SNAKE_CASE )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[float, float]:
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_lowercase , _lowercase , _lowercase : Tuple = equationa
_lowercase , _lowercase , _lowercase : Dict = equationa
# Calculate the determinants of the matrices
_lowercase : str = aa * ba - aa * ba
_lowercase : Any = ca * ba - ca * ba
_lowercase : Optional[int] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase : Union[str, Any] = determinant_x / determinant
_lowercase : Tuple = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 66 | 1 |
from __future__ import annotations
import requests
UpperCamelCase = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = "new" , SCREAMING_SNAKE_CASE = None ) -> dict:
_lowercase : Dict = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(SCREAMING_SNAKE_CASE ) - valid_terms ) ):
_lowercase : List[str] = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(SCREAMING_SNAKE_CASE )
_lowercase : Dict = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'User-agent': 'A random string'} , )
if response.status_code == 429:
raise requests.HTTPError
_lowercase : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(SCREAMING_SNAKE_CASE )}
_lowercase : List[Any] = {}
for id_ in range(SCREAMING_SNAKE_CASE ):
_lowercase : List[str] = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 66 |
def __magic_name__ ( SCREAMING_SNAKE_CASE = 50 ) -> int:
_lowercase : Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 | 1 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = 50 # max width of layer names
UpperCamelCase = 70 # max width of quantizer names
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
_lowercase : List[str] = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=SCREAMING_SNAKE_CASE , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=SCREAMING_SNAKE_CASE , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=SCREAMING_SNAKE_CASE , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=SCREAMING_SNAKE_CASE , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=SCREAMING_SNAKE_CASE , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=SCREAMING_SNAKE_CASE , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple:
if args.calibrator == "max":
_lowercase : Optional[int] = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
_lowercase : Tuple = 'histogram'
elif args.calibrator == "mse":
_lowercase : int = 'histogram'
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
_lowercase : Union[str, Any] = QuantDescriptor(num_bits=args.aprec , calib_method=SCREAMING_SNAKE_CASE )
_lowercase : int = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(SCREAMING_SNAKE_CASE )
quant_nn.QuantLinear.set_default_quant_desc_weight(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
logger.info('Configuring Model for Quantization' )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , ['embeddings'] , which='weight' , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [''] , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable_keyword:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , args.quant_disable_keyword , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_enable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=SCREAMING_SNAKE_CASE )
if args.recalibrate_weights:
recalibrate_weights(SCREAMING_SNAKE_CASE )
if args.fuse_qkv:
fuse_qkv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if args.clip_gelu:
clip_gelu(SCREAMING_SNAKE_CASE , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
def fusea(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for mod in [qq, qk, qv]:
if not hasattr(SCREAMING_SNAKE_CASE , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
_lowercase : str = qq._amax.detach().item()
_lowercase : int = qk._amax.detach().item()
_lowercase : List[Any] = qv._amax.detach().item()
_lowercase : Optional[int] = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
qq._amax.fill_(SCREAMING_SNAKE_CASE )
qk._amax.fill_(SCREAMING_SNAKE_CASE )
qv._amax.fill_(SCREAMING_SNAKE_CASE )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
_lowercase : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=SCREAMING_SNAKE_CASE )
_lowercase : int = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
_lowercase : int = mod.weight.shape[0]
_lowercase : List[str] = mod._weight_quantizer._amax.detach()
_lowercase : Any = torch.ones(SCREAMING_SNAKE_CASE , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_lowercase : List[Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_lowercase : Union[str, Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
_lowercase : Union[str, Any] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=SCREAMING_SNAKE_CASE , keepdims=SCREAMING_SNAKE_CASE ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
_lowercase : Any = amax
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=25 , SCREAMING_SNAKE_CASE=180 , SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
if ignore is None:
_lowercase : Optional[Any] = []
elif not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Dict = [ignore]
_lowercase : Tuple = 0
for name, mod in model.named_modules():
if not hasattr(SCREAMING_SNAKE_CASE , 'weight' ):
continue
_lowercase : Tuple = max(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
for name, mod in model.named_modules():
_lowercase : Any = getattr(SCREAMING_SNAKE_CASE , '_input_quantizer' , SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , '_weight_quantizer' , SCREAMING_SNAKE_CASE )
if not hasattr(SCREAMING_SNAKE_CASE , 'weight' ):
continue
if type(SCREAMING_SNAKE_CASE ) in ignore:
continue
if [True for s in ignore if type(SCREAMING_SNAKE_CASE ) is str and s in name]:
continue
_lowercase : int = F"""Act:{input_q.extra_repr()}"""
_lowercase : Optional[int] = F"""Wgt:{weight_q.extra_repr()}"""
_lowercase : List[str] = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(SCREAMING_SNAKE_CASE ) <= line_width:
logger.info(SCREAMING_SNAKE_CASE )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{' ':{name_width}} {wgt_str}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : int = 0
for name, mod in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
_lowercase : Tuple = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if quantizer_mod is not None:
assert hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="both" , **SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Optional[Any] = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '_input_quantizer' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if which in ["weight", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '_weight_quantizer' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , '_input_quantizer' ) or hasattr(SCREAMING_SNAKE_CASE , '_weight_quantizer' ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
set_quantizers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : str = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(SCREAMING_SNAKE_CASE )
| 66 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConvNextFeatureExtractor"]
UpperCamelCase = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 7 , SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
_lowercase : Union[str, Any] = 0
_lowercase : Optional[int] = 1
for current_denominator in range(1 , limit + 1 ):
_lowercase : str = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_lowercase : List[Any] = current_numerator
_lowercase : Any = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 66 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
_lowercase : Optional[Any] = 4
_lowercase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
_lowercase : Union[str, Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 66 | 1 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = FlaxAutoencoderKL
@property
def __a ( self ):
_lowercase : Optional[Any] = 4
_lowercase : List[str] = 3
_lowercase : int = (3_2, 3_2)
_lowercase : List[str] = jax.random.PRNGKey(0 )
_lowercase : Union[str, Any] = jax.random.uniform(_lowerCAmelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __a ( self ):
_lowercase : Union[str, Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
_lowercase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
| 66 |
import random
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> dict:
_lowercase : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE )
return graph
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict:
return {
i: [j for j in range(SCREAMING_SNAKE_CASE ) if i != j] for i in range(SCREAMING_SNAKE_CASE )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , *_lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : Union[str, Any] = eval_examples
_lowercase : List[Any] = post_process_function
def __a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase = "eval" ):
_lowercase : str = self.eval_dataset if eval_dataset is None else eval_dataset
_lowercase : Union[str, Any] = self.get_eval_dataloader(_lowerCAmelCase )
_lowercase : int = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_lowercase : Union[str, Any] = self.compute_metrics
_lowercase : Any = None
_lowercase : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_lowercase : Tuple = time.time()
try:
_lowercase : Tuple = eval_loop(
_lowerCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
_lowercase : Tuple = compute_metrics
_lowercase : str = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_lowercase : List[Any] = self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , output.predictions )
_lowercase : Dict = self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
_lowercase : List[Any] = metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
else:
_lowercase : Optional[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_lowercase : Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCAmelCase )
return metrics
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase = "test" ):
_lowercase : Any = self.get_test_dataloader(_lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_lowercase : Tuple = self.compute_metrics
_lowercase : Optional[int] = None
_lowercase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_lowercase : Optional[int] = time.time()
try:
_lowercase : str = eval_loop(
_lowerCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
_lowercase : Dict = compute_metrics
_lowercase : str = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_lowercase : List[str] = self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , output.predictions , 'predict' )
_lowercase : str = self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
_lowercase : Optional[int] = metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCAmelCase )
| 66 |
from __future__ import annotations
UpperCamelCase = tuple[int, int, int]
UpperCamelCase = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCamelCase = "EGZWVONAHDCLFQMSIPJBYUKXTR"
UpperCamelCase = "FOBHMDKEXQNRAULPGSJVTYICZW"
UpperCamelCase = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
UpperCamelCase = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
UpperCamelCase = "RMDJXFUWGISLHVTCQNKYPBEZOA"
UpperCamelCase = "SGLCPQWZHKXAREONTFBVIYJUDM"
UpperCamelCase = "HVSICLTYKQUBXDWAJZOMFGPREN"
UpperCamelCase = "RZWQHFMVDBKICJLNTUXAGYPSOE"
UpperCamelCase = "LFKIJODBEGAMQPXVUHYSTCZRWN"
UpperCamelCase = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(SCREAMING_SNAKE_CASE ) )) < 3:
_lowercase : Optional[int] = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
_lowercase , _lowercase , _lowercase : int = rotpos
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : Dict = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : int = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : str = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Validates string and returns dict
_lowercase : Tuple = _plugboard(SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = F"""Plugboard setting isn't type string ({type(SCREAMING_SNAKE_CASE )})"""
raise TypeError(SCREAMING_SNAKE_CASE )
elif len(SCREAMING_SNAKE_CASE ) % 2 != 0:
_lowercase : Optional[int] = F"""Odd number of symbols ({len(SCREAMING_SNAKE_CASE )})"""
raise Exception(SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
_lowercase : Dict = set()
for i in pbstring:
if i not in abc:
_lowercase : str = F"""'{i}' not in list of symbols"""
raise Exception(SCREAMING_SNAKE_CASE )
elif i in tmppbl:
_lowercase : int = F"""Duplicate symbol ({i})"""
raise Exception(SCREAMING_SNAKE_CASE )
else:
tmppbl.add(SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
_lowercase : Optional[Any] = {}
for j in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ):
_lowercase : Dict = pbstring[j + 1]
_lowercase : Union[str, Any] = pbstring[j]
return pb
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , SCREAMING_SNAKE_CASE = "" , ) -> str:
_lowercase : List[str] = text.upper()
_lowercase , _lowercase , _lowercase : List[str] = _validator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , plugb.upper() )
_lowercase , _lowercase , _lowercase : Optional[int] = rotor_position
_lowercase , _lowercase , _lowercase : Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowercase : Optional[int] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowercase : Dict = plugboard[symbol]
# rotor ra --------------------------
_lowercase : Optional[Any] = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : Union[str, Any] = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
_lowercase : Tuple = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : str = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
_lowercase : List[Any] = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : List[str] = rotora[index % len(SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowercase : List[str] = reflector[symbol]
# 2nd rotors
_lowercase : List[str] = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
_lowercase : Tuple = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
_lowercase : Dict = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowercase : int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Any = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : int = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Any = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = "This is my Python script that emulates the Enigma machine from WWII."
UpperCamelCase = (1, 1, 1)
UpperCamelCase = "pictures"
UpperCamelCase = (rotora, rotora, rotora)
UpperCamelCase = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 66 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> YolosConfig:
_lowercase : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_lowercase : List[str] = 192
_lowercase : Optional[int] = 768
_lowercase : int = 12
_lowercase : Union[str, Any] = 3
_lowercase : Dict = [800, 1_333]
_lowercase : Any = False
elif yolos_name == "yolos_s_dWr":
_lowercase : Any = 330
_lowercase : Dict = 14
_lowercase : Optional[Any] = 6
_lowercase : List[str] = 1_320
elif "yolos_s" in yolos_name:
_lowercase : Union[str, Any] = 384
_lowercase : Optional[Any] = 1_536
_lowercase : Tuple = 12
_lowercase : Tuple = 6
elif "yolos_b" in yolos_name:
_lowercase : Optional[int] = [800, 1_344]
_lowercase : int = 91
_lowercase : Union[str, Any] = 'huggingface/label-files'
_lowercase : List[Any] = 'coco-detection-id2label.json'
_lowercase : Optional[int] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_lowercase : Optional[int] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowercase : Union[str, Any] = idalabel
_lowercase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase : str = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowercase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase : int = in_proj_weight[: config.hidden_size, :]
_lowercase : Union[str, Any] = in_proj_bias[: config.hidden_size]
_lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase : Tuple = in_proj_weight[-config.hidden_size :, :]
_lowercase : Tuple = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
if "backbone" in name:
_lowercase : Tuple = name.replace('backbone' , 'vit' )
if "cls_token" in name:
_lowercase : Optional[int] = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
_lowercase : List[Any] = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
_lowercase : Optional[int] = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
_lowercase : Optional[int] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
_lowercase : List[str] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
_lowercase : Union[str, Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_lowercase : Any = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowercase : Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : Any = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
_lowercase : Optional[Any] = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
_lowercase : Dict = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
_lowercase : List[Any] = name.replace('vit.norm' , 'vit.layernorm' )
return name
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> dict:
for key in orig_state_dict.copy().keys():
_lowercase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
_lowercase : List[str] = key.split('.' )
_lowercase : Optional[Any] = int(key_split[2] )
_lowercase : Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_lowercase : Union[str, Any] = val[:dim, :]
_lowercase : Optional[int] = val[
dim : dim * 2, :
]
_lowercase : Dict = val[-dim:, :]
else:
_lowercase : Optional[int] = val[:dim]
_lowercase : str = val[dim : dim * 2]
_lowercase : Union[str, Any] = val[-dim:]
else:
_lowercase : Tuple = val
return orig_state_dict
def __magic_name__ ( ) -> torch.Tensor:
_lowercase : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> Any:
_lowercase : Dict = get_yolos_config(SCREAMING_SNAKE_CASE )
# load original state_dict
_lowercase : Optional[int] = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# load 🤗 model
_lowercase : Optional[int] = YolosForObjectDetection(SCREAMING_SNAKE_CASE )
model.eval()
_lowercase : int = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by YolosImageProcessor
_lowercase : Optional[Any] = 800 if yolos_name != 'yolos_ti' else 512
_lowercase : List[str] = YolosImageProcessor(format='coco_detection' , size=SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' )
_lowercase : List[Any] = model(**SCREAMING_SNAKE_CASE )
_lowercase , _lowercase : Tuple = outputs.logits, outputs.pred_boxes
_lowercase , _lowercase : Union[str, Any] = None, None
if yolos_name == "yolos_ti":
_lowercase : List[Any] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
_lowercase : Tuple = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
_lowercase : List[str] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
_lowercase : Union[str, Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
_lowercase : Optional[Any] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
_lowercase : List[Any] = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
_lowercase : Any = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
_lowercase : Any = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
_lowercase : Optional[int] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
_lowercase : Dict = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
_lowercase : Union[str, Any] = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
_lowercase : str = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE , organization='hustvl' )
model.push_to_hub(SCREAMING_SNAKE_CASE , organization='hustvl' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["GLPNFeatureExtractor"]
UpperCamelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 1 |
from __future__ import annotations
from math import pi, sqrt
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple:
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : List[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
_lowercase : List[Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_lowerCAmelCase )
from datasets import load_dataset
_lowercase : Union[str, Any] = load_dataset('nielsr/rvlcdip-demo' )
_lowercase : Any = dataset['train'][0]['image'].convert('RGB' )
_lowercase : List[str] = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : Dict = model(**_lowerCAmelCase )
_lowercase : Any = outputs.logits
_lowercase : str = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , _lowerCAmelCase )
_lowercase : Union[str, Any] = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=_lowerCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 66 | 1 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase = logging.get_logger(__name__)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple:
_lowercase : Optional[int] = tesseract_config if tesseract_config is not None else ''
# apply OCR
_lowercase : Any = to_pil_image(SCREAMING_SNAKE_CASE )
_lowercase , _lowercase : Any = pil_image.size
_lowercase : Dict = pytesseract.image_to_data(SCREAMING_SNAKE_CASE , lang=SCREAMING_SNAKE_CASE , output_type='dict' , config=SCREAMING_SNAKE_CASE )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[str] = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_lowercase : Optional[Any] = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE ) if not word.strip()]
_lowercase : Union[str, Any] = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowercase : Optional[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowercase : int = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowercase : List[str] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowercase : Optional[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_lowercase : Any = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
_lowercase : Union[str, Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Union[str, Any] = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = "" , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
_lowercase : Optional[Any] = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
_lowercase : int = get_size_dict(_lowerCAmelCase )
_lowercase : Dict = do_resize
_lowercase : Any = size
_lowercase : Tuple = resample
_lowercase : Optional[int] = apply_ocr
_lowercase : Optional[int] = ocr_lang
_lowercase : str = tesseract_config
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowercase : Any = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowercase : List[Any] = (size['height'], size['width'])
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ):
_lowercase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
_lowercase : Tuple = size if size is not None else self.size
_lowercase : Optional[int] = get_size_dict(_lowerCAmelCase )
_lowercase : Union[str, Any] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
_lowercase : str = ocr_lang if ocr_lang is not None else self.ocr_lang
_lowercase : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
_lowercase : Tuple = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
_lowercase : Union[str, Any] = [to_numpy_array(_lowerCAmelCase ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
_lowercase : Any = []
_lowercase : Any = []
for image in images:
_lowercase , _lowercase : int = apply_tesseract(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
words_batch.append(_lowerCAmelCase )
boxes_batch.append(_lowerCAmelCase )
if do_resize:
_lowercase : List[Any] = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_lowercase : List[str] = [flip_channel_order(_lowerCAmelCase ) for image in images]
_lowercase : int = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowercase : Optional[Any] = BatchFeature(data={'pixel_values': images} , tensor_type=_lowerCAmelCase )
if apply_ocr:
_lowercase : Optional[Any] = words_batch
_lowercase : Dict = boxes_batch
return data
| 66 |
from PIL import Image
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Image:
def brightness(SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 66 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE = 50 ) -> int:
_lowercase : Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[Any] = torch.nn.Linear(1_0 , 1_0 )
_lowercase : Any = torch.optim.SGD(model.parameters() , 0.1 )
_lowercase : str = Accelerator()
_lowercase : Any = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 66 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[int] = "van"
def __init__( self , _lowerCAmelCase=2_2_4 , _lowerCAmelCase=3 , _lowerCAmelCase=[7, 3, 3, 3] , _lowerCAmelCase=[4, 2, 2, 2] , _lowerCAmelCase=[6_4, 1_2_8, 3_2_0, 5_1_2] , _lowerCAmelCase=[3, 3, 1_2, 3] , _lowerCAmelCase=[8, 8, 4, 4] , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-6 , _lowerCAmelCase=1E-2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
_lowercase : Tuple = image_size
_lowercase : Dict = num_channels
_lowercase : Any = patch_sizes
_lowercase : List[str] = strides
_lowercase : List[Any] = hidden_sizes
_lowercase : Optional[int] = depths
_lowercase : Dict = mlp_ratios
_lowercase : Dict = hidden_act
_lowercase : List[str] = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : Optional[int] = layer_scale_init_value
_lowercase : Tuple = drop_path_rate
_lowercase : Any = dropout_rate
| 66 |
import requests
from bsa import BeautifulSoup
def __magic_name__ ( SCREAMING_SNAKE_CASE = "AAPL" ) -> str:
_lowercase : str = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_lowercase : int = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , 'html.parser' )
_lowercase : List[str] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 66 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["PoolFormerFeatureExtractor"]
UpperCamelCase = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 1 |
from __future__ import annotations
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]:
_lowercase : int = str(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = [n]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
if len(str(SCREAMING_SNAKE_CASE ) ) > 3:
if not is_prime(int(str(SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(SCREAMING_SNAKE_CASE )[:3] ) ):
return False
return True
def __magic_name__ ( SCREAMING_SNAKE_CASE = 11 ) -> list[int]:
_lowercase : list[int] = []
_lowercase : Optional[int] = 13
while len(SCREAMING_SNAKE_CASE ) != count:
if validate(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = list_truncated_nums(SCREAMING_SNAKE_CASE )
if all(is_prime(SCREAMING_SNAKE_CASE ) for i in list_nums ):
list_truncated_primes.append(SCREAMING_SNAKE_CASE )
num += 2
return list_truncated_primes
def __magic_name__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(11)) = }''')
| 66 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = LayoutLMTokenizer
_UpperCamelCase : Union[str, Any] = LayoutLMTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = True
def __a ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __a ( self , **_lowerCAmelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : str = 'UNwant\u00E9d,running'
_lowercase : List[Any] = 'unwanted, running'
return input_text, output_text
def __a ( self ):
_lowercase : Dict = self.tokenizer_class(self.vocab_file )
_lowercase : Dict = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [7, 4, 5, 1_0, 8, 9] )
def __a ( self ):
pass
| 66 | 1 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowercase : Dict = parent
_lowercase : Optional[int] = batch_size
_lowercase : List[str] = seq_length
_lowercase : List[str] = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : Union[str, Any] = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : Tuple = vocab_size
_lowercase : Tuple = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Dict = intermediate_size
_lowercase : List[str] = hidden_act
_lowercase : Any = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : Optional[Any] = type_vocab_size
_lowercase : str = type_sequence_label_size
_lowercase : str = initializer_range
_lowercase : List[Any] = num_labels
_lowercase : List[str] = num_choices
_lowercase : Union[str, Any] = scope
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : str = None
if self.use_input_mask:
_lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : str = None
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : List[str] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_lowerCAmelCase , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = FalconModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowercase : str = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : Union[str, Any] = True
_lowercase : str = FalconModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Any = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
_lowercase : List[Any] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
_lowercase : Tuple = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : Union[str, Any] = FalconForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Tuple = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : Optional[Any] = True
_lowercase : Dict = True
_lowercase : Optional[int] = FalconForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
_lowercase : Tuple = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase , )
_lowercase : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowercase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
_lowercase : Tuple = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0]
_lowercase : Any = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0]
# select random slice
_lowercase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def __a ( self ):
_lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = config_and_inputs
_lowercase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Optional[Any] = (FalconForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : str = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Tuple = False
_UpperCamelCase : str = False
def __a ( self ):
_lowercase : Dict = FalconModelTester(self )
_lowercase : Dict = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase , *_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_lowercase : Union[str, Any] = alibi
self.model_tester.create_and_check_model(_lowerCAmelCase , *_lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[Any] = 3
_lowercase : int = input_dict['input_ids']
_lowercase : List[Any] = input_ids.ne(1 ).to(_lowerCAmelCase )
_lowercase : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowercase : List[str] = FalconForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Tuple = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self ):
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : List[Any] = 3
_lowercase : Tuple = 'single_label_classification'
_lowercase : Dict = input_dict['input_ids']
_lowercase : Union[str, Any] = input_ids.ne(1 ).to(_lowerCAmelCase )
_lowercase : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowercase : Tuple = FalconForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self ):
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[Any] = input_dict['input_ids']
_lowercase : Union[str, Any] = FalconForCausalLM(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : List[Any] = model(_lowerCAmelCase , use_cache=_lowerCAmelCase )
_lowercase : int = input_ids.shape[0]
_lowercase : str = model._convert_to_rw_cache(result.past_key_values )
_lowercase : List[str] = model._convert_cache_to_standard_format(_lowerCAmelCase , _lowerCAmelCase )
for layer in range(len(_lowerCAmelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __a ( self ):
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : List[str] = 3
_lowercase : List[Any] = 'multi_label_classification'
_lowercase : Union[str, Any] = input_dict['input_ids']
_lowercase : int = input_ids.ne(1 ).to(_lowerCAmelCase )
_lowercase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowercase : Dict = FalconForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_lowerCAmelCase , 'use_cache' ):
return
_lowercase : Any = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
if "use_cache" not in inputs:
_lowercase : Optional[int] = True
_lowercase : Tuple = model(**_lowerCAmelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_lowercase : str = (
getattr(_lowerCAmelCase , 'decoder_layers' , _lowerCAmelCase )
or getattr(_lowerCAmelCase , 'num_decoder_layers' , _lowerCAmelCase )
or config.num_hidden_layers
)
_lowercase : Tuple = getattr(_lowerCAmelCase , 'num_kv_heads' , config.num_attention_heads )
_lowercase : Any = getattr(_lowerCAmelCase , 'd_model' , config.hidden_size )
_lowercase : List[Any] = embed_dim // num_attention_heads
_lowercase : List[str] = outputs['past_key_values']
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
_lowercase , _lowercase : Optional[Any] = inputs['input_ids'].shape
for i in range(_lowerCAmelCase ):
if config.new_decoder_architecture:
_lowercase : Dict = config.num_attention_heads
elif config.multi_query:
_lowercase : Dict = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : List[str] = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
_lowercase : Any = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(_lowerCAmelCase )
_lowercase : Union[str, Any] = tokenizer('My favorite food is' , return_tensors='pt' ).to(_lowerCAmelCase )
_lowercase : Tuple = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
_lowercase : Optional[Any] = model.generate(**_lowerCAmelCase , do_sample=_lowerCAmelCase , max_new_tokens=1_9 )
_lowercase : Tuple = tokenizer.batch_decode(_lowerCAmelCase )[0]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
@slow
def __a ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_lowercase : List[str] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
_lowercase : str = FalconForCausalLM.from_pretrained(_lowerCAmelCase )
model.eval()
model.to(_lowerCAmelCase )
_lowercase : str = tokenizer('My favorite food is' , return_tensors='pt' ).to(_lowerCAmelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_lowerCAmelCase , do_sample=_lowerCAmelCase , max_new_tokens=4 )
model.generate(**_lowerCAmelCase , do_sample=_lowerCAmelCase , max_new_tokens=4 )
model.generate(**_lowerCAmelCase , num_beams=2 , max_new_tokens=4 )
@slow
def __a ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_lowercase : str = AutoTokenizer.from_pretrained(_lowerCAmelCase )
_lowercase : Union[str, Any] = FalconForCausalLM.from_pretrained(_lowerCAmelCase )
model.eval()
model.to(device=_lowerCAmelCase )
_lowercase : Any = tokenizer('My favorite food is' , return_tensors='pt' ).to(_lowerCAmelCase )
# Test results are the same with and without cache
_lowercase : str = model.generate(**_lowerCAmelCase , do_sample=_lowerCAmelCase , max_new_tokens=2_0 , use_cache=_lowerCAmelCase )
_lowercase : Dict = model.generate(**_lowerCAmelCase , do_sample=_lowerCAmelCase , max_new_tokens=2_0 , use_cache=_lowerCAmelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 66 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : str = ShapEPipeline
_UpperCamelCase : Any = ["prompt"]
_UpperCamelCase : int = ["prompt"]
_UpperCamelCase : Union[str, Any] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : Optional[Any] = False
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return self.time_input_dim * 4
@property
def __a ( self ):
return 8
@property
def __a ( self ):
_lowercase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_lowerCAmelCase )
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = {
'num_attention_heads': 2,
'attention_head_dim': 1_6,
'embedding_dim': self.time_input_dim,
'num_embeddings': 3_2,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowercase : Optional[Any] = PriorTransformer(**_lowerCAmelCase )
return model
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = {
'param_shapes': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 1_2,
'background': (
0.1,
0.1,
0.1,
),
}
_lowercase : List[Any] = ShapERenderer(**_lowerCAmelCase )
return model
def __a ( self ):
_lowercase : Optional[Any] = self.dummy_prior
_lowercase : Dict = self.dummy_text_encoder
_lowercase : List[str] = self.dummy_tokenizer
_lowercase : Union[str, Any] = self.dummy_renderer
_lowercase : List[str] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
_lowercase : List[str] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : Optional[Any] = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : List[Any] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 3_2,
'output_type': 'np',
}
return inputs
def __a ( self ):
_lowercase : Optional[int] = 'cpu'
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : Tuple = self.pipeline_class(**_lowerCAmelCase )
_lowercase : Union[str, Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Union[str, Any] = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
_lowercase : str = output.images[0]
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
_lowercase : str = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self ):
_lowercase : List[Any] = torch_device == 'cpu'
_lowercase : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def __a ( self ):
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : Optional[int] = self.pipeline_class(**_lowerCAmelCase )
_lowercase : Any = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : str = 1
_lowercase : Optional[int] = 2
_lowercase : List[str] = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
_lowercase : int = batch_size * [inputs[key]]
_lowercase : Optional[int] = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
_lowercase : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
_lowercase : List[str] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Tuple = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowercase : int = pipe(
'a shark' , generator=_lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 66 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_lowercase : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowerCAmelCase , cache_dir=_lowerCAmelCase )
_lowercase : str = [t[-1] for t in os.walk(os.path.join(_lowerCAmelCase , os.listdir(_lowerCAmelCase )[0] , 'snapshots' ) )]
_lowercase : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase , _lowercase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowerCAmelCase )
_lowercase : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase : Optional[Any] = jax.random.PRNGKey(0 )
_lowercase : Optional[Any] = 4
_lowercase : Any = jax.device_count()
_lowercase : Dict = num_samples * [prompt]
_lowercase : List[str] = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
_lowercase : Tuple = replicate(_lowerCAmelCase )
_lowercase : Optional[Any] = jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[str] = shard(_lowerCAmelCase )
_lowercase : List[Any] = pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
_lowercase : Any = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowerCAmelCase ) == num_samples
def __a ( self ):
_lowercase , _lowercase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowerCAmelCase )
_lowercase : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase : Tuple = jax.random.PRNGKey(0 )
_lowercase : Optional[int] = 5_0
_lowercase : Optional[Any] = jax.device_count()
_lowercase : List[str] = num_samples * [prompt]
_lowercase : Dict = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
_lowercase : str = replicate(_lowerCAmelCase )
_lowercase : Optional[int] = jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = shard(_lowerCAmelCase )
_lowercase : Dict = pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def __a ( self ):
_lowercase , _lowercase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowerCAmelCase )
_lowercase : Union[str, Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase : Any = jax.random.PRNGKey(0 )
_lowercase : Optional[int] = 5_0
_lowercase : Optional[Any] = jax.device_count()
_lowercase : Union[str, Any] = num_samples * [prompt]
_lowercase : Any = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
_lowercase : Optional[Any] = replicate(_lowerCAmelCase )
_lowercase : Any = jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Dict = shard(_lowerCAmelCase )
_lowercase : Union[str, Any] = pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def __a ( self ):
_lowercase , _lowercase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_lowercase : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase : Union[str, Any] = jax.random.PRNGKey(0 )
_lowercase : Optional[int] = 5_0
_lowercase : List[str] = jax.device_count()
_lowercase : List[Any] = num_samples * [prompt]
_lowercase : List[str] = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
_lowercase : Union[str, Any] = replicate(_lowerCAmelCase )
_lowercase : List[str] = jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Union[str, Any] = shard(_lowerCAmelCase )
_lowercase : Dict = pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def __a ( self ):
_lowercase : str = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , )
_lowercase , _lowercase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
_lowercase : Any = scheduler.create_state()
_lowercase : List[str] = scheduler_state
_lowercase : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase : List[str] = jax.random.PRNGKey(0 )
_lowercase : int = 5_0
_lowercase : List[str] = jax.device_count()
_lowercase : Any = num_samples * [prompt]
_lowercase : int = pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
_lowercase : str = replicate(_lowerCAmelCase )
_lowercase : str = jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = shard(_lowerCAmelCase )
_lowercase : Dict = pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def __a ( self ):
_lowercase : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase : Optional[Any] = jax.device_count()
_lowercase : Union[str, Any] = num_samples * [prompt]
_lowercase : Optional[Any] = jax.random.split(jax.random.PRNGKey(0 ) , _lowerCAmelCase )
_lowercase , _lowercase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowerCAmelCase , )
_lowercase : Optional[Any] = replicate(_lowerCAmelCase )
_lowercase : Union[str, Any] = pipeline.prepare_inputs(_lowerCAmelCase )
_lowercase : Optional[Any] = shard(_lowerCAmelCase )
_lowercase : Tuple = pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
_lowercase : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
_lowercase , _lowercase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowerCAmelCase , use_memory_efficient_attention=_lowerCAmelCase , )
_lowercase : Optional[Any] = replicate(_lowerCAmelCase )
_lowercase : str = pipeline.prepare_inputs(_lowerCAmelCase )
_lowercase : int = shard(_lowerCAmelCase )
_lowercase : List[Any] = pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
_lowercase : Dict = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 66 |
import sys
UpperCamelCase = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[Any] = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def __magic_name__ ( SCREAMING_SNAKE_CASE = N ) -> int:
_lowercase : Dict = -sys.maxsize - 1
_lowercase : Tuple = n[:13]
_lowercase : List[Any] = 13
while cur_index < len(SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_lowercase : List[str] = substr[1:] + n[cur_index]
cur_index += 1
else:
_lowercase : str = max(SCREAMING_SNAKE_CASE , str_eval(SCREAMING_SNAKE_CASE ) )
_lowercase : Dict = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 | 1 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( __snake_case ):
def __a ( self ):
_lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , 'width_multiplier' ) )
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=6_4 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase="swish" , _lowerCAmelCase=3 , _lowerCAmelCase=3_2 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=1_0 , _lowerCAmelCase=None , _lowerCAmelCase=0.25 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , ):
_lowercase : List[Any] = parent
_lowercase : str = batch_size
_lowercase : Tuple = image_size
_lowercase : int = patch_size
_lowercase : Optional[int] = num_channels
_lowercase : int = make_divisible(5_1_2 * width_multiplier , divisor=8 )
_lowercase : Optional[Any] = hidden_act
_lowercase : Any = conv_kernel_size
_lowercase : List[str] = output_stride
_lowercase : Tuple = classifier_dropout_prob
_lowercase : Union[str, Any] = use_labels
_lowercase : Optional[int] = is_training
_lowercase : Optional[int] = num_labels
_lowercase : Optional[Any] = initializer_range
_lowercase : Any = scope
_lowercase : List[str] = width_multiplier
_lowercase : int = ffn_dropout
_lowercase : List[Any] = attn_dropout
def __a ( self ):
_lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : Union[str, Any] = None
_lowercase : int = None
if self.use_labels:
_lowercase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
_lowercase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = MobileViTVaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : str = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = self.num_labels
_lowercase : List[Any] = MobileViTVaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Optional[int] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = self.num_labels
_lowercase : Optional[int] = MobileViTVaForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : List[Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowercase : Optional[int] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self ):
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : List[str] = config_and_inputs
_lowercase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCamelCase : List[Any] = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Any = False
def __a ( self ):
_lowercase : Optional[int] = MobileViTVaModelTester(self )
_lowercase : Optional[int] = MobileViTVaConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def __a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def __a ( self ):
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def __a ( self ):
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def __a ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def __a ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __a ( self ):
pass
def __a ( self ):
_lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Any = model_class(_lowerCAmelCase )
_lowercase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : List[str] = [*signature.parameters.keys()]
_lowercase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def __a ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : int = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase : Any = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowercase : Union[str, Any] = outputs.hidden_states
_lowercase : Optional[Any] = 5
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase : Optional[Any] = 2
for i in range(len(_lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Union[str, Any] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = MobileViTVaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __magic_name__ ( ) -> List[str]:
_lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def __a ( self ):
_lowercase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
_lowerCAmelCase )
_lowercase : Dict = self.default_image_processor
_lowercase : Dict = prepare_img()
_lowercase : Dict = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : Dict = model(**_lowerCAmelCase )
# verify the logits
_lowercase : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowercase : int = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __a ( self ):
_lowercase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowercase : Optional[Any] = model.to(_lowerCAmelCase )
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowercase : List[Any] = prepare_img()
_lowercase : List[Any] = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : str = model(**_lowerCAmelCase )
_lowercase : Dict = outputs.logits
# verify the logits
_lowercase : Dict = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , _lowerCAmelCase )
_lowercase : str = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=_lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __a ( self ):
_lowercase : int = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowercase : Optional[Any] = model.to(_lowerCAmelCase )
_lowercase : Tuple = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowercase : str = prepare_img()
_lowercase : Dict = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : int = model(**_lowerCAmelCase )
_lowercase : int = outputs.logits.detach().cpu()
_lowercase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase , target_sizes=[(5_0, 6_0)] )
_lowercase : Optional[Any] = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
_lowercase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase )
_lowercase : List[str] = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
| 66 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : str = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
_lowercase : Tuple = {
'input_ids': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
_lowercase : Optional[int] = model(_lowerCAmelCase )['last_hidden_state']
_lowercase : Union[str, Any] = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice.
_lowercase : List[Any] = tf.convert_to_tensor(
[
[
[0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04],
[-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44],
[-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 66 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : str = ["image_processor", "tokenizer"]
_UpperCamelCase : Union[str, Any] = "AutoImageProcessor"
_UpperCamelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Union[str, Any] = self.image_processor
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if images is not None:
_lowercase : Union[str, Any] = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and images is not None:
_lowercase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 66 | 1 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCamelCase = "http://www.mocksite.com/file1.txt"
UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class lowerCAmelCase_ :
_UpperCamelCase : int = 200
_UpperCamelCase : Tuple = {"Content-Length": "100"}
_UpperCamelCase : int = {}
def __a ( self , **_lowerCAmelCase ):
return [bytes(_lowerCAmelCase , 'utf-8' )]
def __magic_name__ ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
import requests
monkeypatch.setattr(SCREAMING_SNAKE_CASE , 'request' , SCREAMING_SNAKE_CASE )
_lowercase : int = URL
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Any = url
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : str = [url]
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : List[str] = {'train': url}
_lowercase : Optional[int] = 'dummy'
_lowercase : Any = 'downloads'
_lowercase : List[str] = tmp_path
_lowercase : str = DownloadConfig(
cache_dir=os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , use_etag=SCREAMING_SNAKE_CASE , )
_lowercase : Tuple = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE , download_config=SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = dl_manager.download(SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = [downloaded_paths]
_lowercase : List[Any] = [urls]
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert "train" in downloaded_paths.keys()
_lowercase : Dict = downloaded_paths.values()
_lowercase : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_lowercase : Any = Path(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_lowercase : List[str] = downloaded_path.read_text()
assert content == CONTENT
_lowercase : Optional[int] = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_lowercase : List[str] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
_lowercase : Optional[Any] = str(SCREAMING_SNAKE_CASE )
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : int = filename
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : str = [filename]
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : int = {'train': filename}
_lowercase : Dict = 'dummy'
_lowercase : List[str] = xz_file.parent
_lowercase : Optional[Any] = 'extracted'
_lowercase : str = DownloadConfig(
cache_dir=SCREAMING_SNAKE_CASE , use_etag=SCREAMING_SNAKE_CASE , )
_lowercase : int = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE , download_config=SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = dl_manager.extract(SCREAMING_SNAKE_CASE )
_lowercase : Dict = paths
for extracted_paths in [extracted_paths]:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = [extracted_paths]
_lowercase : int = [paths]
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert "train" in extracted_paths.keys()
_lowercase : Any = extracted_paths.values()
_lowercase : Any = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_lowercase : int = Path(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(SCREAMING_SNAKE_CASE , etag=SCREAMING_SNAKE_CASE )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_lowercase : List[Any] = extracted_path.read_text()
_lowercase : Optional[Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(SCREAMING_SNAKE_CASE , start=1 ):
_lowercase : List[str] = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : str = request.getfixturevalue(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Optional[int] = request.getfixturevalue(SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert num_tar == 1
assert num_jsonl == 2
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Dict = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(SCREAMING_SNAKE_CASE ) , start=1 ):
assert os.path.basename(SCREAMING_SNAKE_CASE ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 66 |
from __future__ import annotations
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]:
if num <= 0:
_lowercase : List[str] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = [True] * (num + 1)
_lowercase : Union[str, Any] = []
_lowercase : Dict = 2
_lowercase : Union[str, Any] = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
_lowercase : str = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 66 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Optional[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
def __magic_name__ ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(SCREAMING_SNAKE_CASE , ['torch'] )
def __magic_name__ ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(SCREAMING_SNAKE_CASE , ['torch'] )
def __magic_name__ ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(SCREAMING_SNAKE_CASE , ['torch'] )
def __magic_name__ ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
requires_backends(SCREAMING_SNAKE_CASE , ['torch'] )
def __magic_name__ ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(SCREAMING_SNAKE_CASE , ['torch'] )
def __magic_name__ ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
requires_backends(SCREAMING_SNAKE_CASE , ['torch'] )
def __magic_name__ ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(SCREAMING_SNAKE_CASE , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Optional[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Optional[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Optional[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch'] )
| 66 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : int = 384
if "tiny" in model_name:
_lowercase : Tuple = [3, 3, 9, 3]
_lowercase : List[str] = [96, 192, 384, 768]
if "small" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : Union[str, Any] = [96, 192, 384, 768]
if "base" in model_name:
_lowercase : List[Any] = [3, 3, 27, 3]
_lowercase : Dict = [128, 256, 512, 1_024]
_lowercase : Optional[int] = 512
if "large" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : List[Any] = [192, 384, 768, 1_536]
_lowercase : Tuple = 768
if "xlarge" in model_name:
_lowercase : str = [3, 3, 27, 3]
_lowercase : List[str] = [256, 512, 1_024, 2_048]
_lowercase : Tuple = 1_024
# set label information
_lowercase : Dict = 150
_lowercase : Union[str, Any] = 'huggingface/label-files'
_lowercase : str = 'ade20k-id2label.json'
_lowercase : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_lowercase : Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
_lowercase : List[str] = ConvNextConfig(
depths=SCREAMING_SNAKE_CASE , hidden_sizes=SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_lowercase : Union[str, Any] = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE , auxiliary_in_channels=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , )
return config
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Any = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Any = dct.pop(SCREAMING_SNAKE_CASE )
_lowercase : Any = val
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : List[Any] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
_lowercase : Optional[int] = model_name_to_url[model_name]
_lowercase : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
_lowercase : Optional[int] = get_upernet_config(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowercase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
if "bn" in key:
_lowercase : Any = key.replace('bn' , 'batch_norm' )
_lowercase : Any = val
# rename keys
_lowercase : int = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify on image
_lowercase : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
_lowercase : Tuple = SegformerImageProcessor()
_lowercase : Tuple = processor(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowercase : Dict = model(SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
_lowercase : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
_lowercase : Union[str, Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
_lowercase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
_lowercase : Optional[int] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
_lowercase : str = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCamelCase = TypeVar("T")
UpperCamelCase = Union[List[T], Tuple[T, ...]]
UpperCamelCase = Union[T, List[T], Dict[str, T]]
UpperCamelCase = Union[str, bytes, os.PathLike]
| 66 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = "upernet"
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=3_8_4 , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=2_5_5 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowercase : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[Any] = backbone_config.get('model_type' )
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Tuple = config_class.from_dict(_lowerCAmelCase )
_lowercase : Optional[Any] = backbone_config
_lowercase : Any = hidden_size
_lowercase : Any = initializer_range
_lowercase : Tuple = pool_scales
_lowercase : List[Any] = use_auxiliary_head
_lowercase : Optional[Any] = auxiliary_loss_weight
_lowercase : Any = auxiliary_in_channels
_lowercase : Any = auxiliary_channels
_lowercase : List[str] = auxiliary_num_convs
_lowercase : List[str] = auxiliary_concat_input
_lowercase : Tuple = loss_ignore_index
def __a ( self ):
_lowercase : str = copy.deepcopy(self.__dict__ )
_lowercase : Tuple = self.backbone_config.to_dict()
_lowercase : int = self.__class__.model_type
return output
| 66 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
_lowercase : str = set(range(3 , SCREAMING_SNAKE_CASE , 2 ) )
primes.add(2 )
for p in range(3 , SCREAMING_SNAKE_CASE , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
_lowercase : Tuple = [float(SCREAMING_SNAKE_CASE ) for n in range(limit + 1 )]
for p in primes:
for n in range(SCREAMING_SNAKE_CASE , limit + 1 , SCREAMING_SNAKE_CASE ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowercase : str = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_lowercase : int = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_lowercase : List[Any] = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 1 |
from __future__ import annotations
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
_lowercase : int = []
for num in range(len(SCREAMING_SNAKE_CASE ) ):
_lowercase : int = 0
while 2 * i * i <= odd_composites[num]:
_lowercase : Union[str, Any] = odd_composites[num] - 2 * i * i
if is_prime(SCREAMING_SNAKE_CASE ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(SCREAMING_SNAKE_CASE ) == n:
return list_nums
return []
def __magic_name__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : int = IFInpaintingSuperResolutionPipeline
_UpperCamelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
_UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {"latents"}
def __a ( self ):
return self._get_superresolution_dummy_components()
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : int = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Union[str, Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __a ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __a ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __a ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ):
self._test_save_load_local()
def __a ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 66 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Any = DiTPipeline
_UpperCamelCase : int = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Dict = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __a ( self ):
torch.manual_seed(0 )
_lowercase : str = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=_lowerCAmelCase , )
_lowercase : int = AutoencoderKL()
_lowercase : List[str] = DDIMScheduler()
_lowercase : Optional[int] = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : List[str] = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Any = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : Union[str, Any] = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __a ( self ):
_lowercase : Dict = 'cpu'
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : List[str] = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Union[str, Any] = self.get_dummy_inputs(_lowerCAmelCase )
_lowercase : Dict = pipe(**_lowerCAmelCase ).images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
_lowercase : Optional[int] = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
_lowercase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1E-3 )
def __a ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __a ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : List[Any] = torch.manual_seed(0 )
_lowercase : Optional[Any] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_lowercase : List[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
_lowercase : List[str] = pipe.get_label_ids(_lowerCAmelCase )
_lowercase : Tuple = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=4_0 , output_type='np' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __a ( self ):
_lowercase : str = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_lowercase : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_lowercase : Optional[Any] = ['vase', 'umbrella']
_lowercase : List[Any] = pipe.get_label_ids(_lowerCAmelCase )
_lowercase : List[str] = torch.manual_seed(0 )
_lowercase : List[Any] = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=2_5 , output_type='np' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 66 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[float, float]:
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_lowercase , _lowercase , _lowercase : Tuple = equationa
_lowercase , _lowercase , _lowercase : Dict = equationa
# Calculate the determinants of the matrices
_lowercase : str = aa * ba - aa * ba
_lowercase : Any = ca * ba - ca * ba
_lowercase : Optional[int] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase : Union[str, Any] = determinant_x / determinant
_lowercase : Tuple = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 66 | 1 |
from __future__ import annotations
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
_lowercase : str = str(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE ) == 9 and set(SCREAMING_SNAKE_CASE ) == set('123456789' )
def __magic_name__ ( ) -> int | None:
for base_num in range(9_999 , 4_999 , -1 ):
_lowercase : Optional[Any] = 100_002 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE ):
return candidate
for base_num in range(333 , 99 , -1 ):
_lowercase : List[str] = 1_002_003 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 |
def __magic_name__ ( SCREAMING_SNAKE_CASE = 50 ) -> int:
_lowercase : Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=1_8 , _lowerCAmelCase=3_0 , _lowerCAmelCase=4_0_0 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , ):
_lowercase : int = parent
_lowercase : Optional[Any] = batch_size
_lowercase : str = num_channels
_lowercase : Optional[Any] = image_size
_lowercase : List[str] = min_resolution
_lowercase : Optional[Any] = max_resolution
_lowercase : int = do_resize
_lowercase : Tuple = size if size is not None else {'height': 1_8, 'width': 2_0}
_lowercase : List[Any] = do_thumbnail
_lowercase : int = do_align_axis
_lowercase : Any = do_pad
_lowercase : int = do_normalize
_lowercase : List[Any] = image_mean
_lowercase : List[str] = image_std
def __a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = DonutImageProcessor if is_vision_available() else None
def __a ( self ):
_lowercase : int = DonutImageProcessingTester(self )
@property
def __a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ):
_lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_thumbnail' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
def __a ( self ):
_lowercase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 2_0} )
_lowercase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
# Previous config had dimensions in (width, height) order
_lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {'height': 8_4, 'width': 4_2} )
def __a ( self ):
pass
@is_flaky()
def __a ( self ):
# Initialize image_processing
_lowercase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
_lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_lowercase : List[Any] = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self ):
# Initialize image_processing
_lowercase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
_lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_lowercase : Any = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __a ( self ):
# Initialize image_processing
_lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowercase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_lowercase : List[Any] = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 66 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConvNextFeatureExtractor"]
UpperCamelCase = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase = "▁"
# Segments (not really needed)
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 3
UpperCamelCase = 4
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = "left"
_UpperCamelCase : int = XLNetTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , **_lowerCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : Dict = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
vocab_file=_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : str = 3
_lowercase : Dict = do_lower_case
_lowercase : int = remove_space
_lowercase : Any = keep_accents
_lowercase : Any = vocab_file
_lowercase : Any = False if not self.vocab_file else True
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Dict = [self.sep_token_id]
_lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Union[str, Any] = [self.sep_token_id]
_lowercase : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase : List[str] = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 66 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
_lowercase : Optional[Any] = 4
_lowercase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
_lowercase : Union[str, Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 66 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCamelCase = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
UpperCamelCase = json.load(f)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self , _lowerCAmelCase ):
return FSMTTokenizer.from_pretrained(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : Tuple = FSMTForConditionalGeneration.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_lowercase : Optional[Any] = F"""facebook/wmt19-{pair}"""
_lowercase : Any = self.get_tokenizer(_lowerCAmelCase )
_lowercase : Union[str, Any] = self.get_model(_lowerCAmelCase )
_lowercase : Any = bleu_data[pair]['src']
_lowercase : Tuple = bleu_data[pair]['tgt']
_lowercase : Tuple = tokenizer(_lowerCAmelCase , return_tensors='pt' , truncation=_lowerCAmelCase , padding='longest' ).to(_lowerCAmelCase )
_lowercase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_lowercase : Union[str, Any] = tokenizer.batch_decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowercase : Optional[Any] = calculate_bleu(_lowerCAmelCase , _lowerCAmelCase )
print(_lowerCAmelCase )
self.assertGreaterEqual(scores['bleu'] , _lowerCAmelCase )
| 66 |
import random
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> dict:
_lowercase : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE )
return graph
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict:
return {
i: [j for j in range(SCREAMING_SNAKE_CASE ) if i != j] for i in range(SCREAMING_SNAKE_CASE )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 1 |
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : int = name
_lowercase : Union[str, Any] = value
_lowercase : Tuple = weight
def __repr__( self ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __a ( self ):
return self.value
def __a ( self ):
return self.name
def __a ( self ):
return self.weight
def __a ( self ):
return self.value / self.weight
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Union[str, Any] = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
_lowercase : List[Any] = sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE )
_lowercase : Dict = []
_lowercase , _lowercase : str = 0.0, 0.0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __magic_name__ ( ) -> List[str]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 |
from __future__ import annotations
UpperCamelCase = tuple[int, int, int]
UpperCamelCase = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCamelCase = "EGZWVONAHDCLFQMSIPJBYUKXTR"
UpperCamelCase = "FOBHMDKEXQNRAULPGSJVTYICZW"
UpperCamelCase = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
UpperCamelCase = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
UpperCamelCase = "RMDJXFUWGISLHVTCQNKYPBEZOA"
UpperCamelCase = "SGLCPQWZHKXAREONTFBVIYJUDM"
UpperCamelCase = "HVSICLTYKQUBXDWAJZOMFGPREN"
UpperCamelCase = "RZWQHFMVDBKICJLNTUXAGYPSOE"
UpperCamelCase = "LFKIJODBEGAMQPXVUHYSTCZRWN"
UpperCamelCase = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(SCREAMING_SNAKE_CASE ) )) < 3:
_lowercase : Optional[int] = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
_lowercase , _lowercase , _lowercase : int = rotpos
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : Dict = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : int = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : str = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Validates string and returns dict
_lowercase : Tuple = _plugboard(SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = F"""Plugboard setting isn't type string ({type(SCREAMING_SNAKE_CASE )})"""
raise TypeError(SCREAMING_SNAKE_CASE )
elif len(SCREAMING_SNAKE_CASE ) % 2 != 0:
_lowercase : Optional[int] = F"""Odd number of symbols ({len(SCREAMING_SNAKE_CASE )})"""
raise Exception(SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
_lowercase : Dict = set()
for i in pbstring:
if i not in abc:
_lowercase : str = F"""'{i}' not in list of symbols"""
raise Exception(SCREAMING_SNAKE_CASE )
elif i in tmppbl:
_lowercase : int = F"""Duplicate symbol ({i})"""
raise Exception(SCREAMING_SNAKE_CASE )
else:
tmppbl.add(SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
_lowercase : Optional[Any] = {}
for j in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ):
_lowercase : Dict = pbstring[j + 1]
_lowercase : Union[str, Any] = pbstring[j]
return pb
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , SCREAMING_SNAKE_CASE = "" , ) -> str:
_lowercase : List[str] = text.upper()
_lowercase , _lowercase , _lowercase : List[str] = _validator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , plugb.upper() )
_lowercase , _lowercase , _lowercase : Optional[int] = rotor_position
_lowercase , _lowercase , _lowercase : Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowercase : Optional[int] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowercase : Dict = plugboard[symbol]
# rotor ra --------------------------
_lowercase : Optional[Any] = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : Union[str, Any] = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
_lowercase : Tuple = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : str = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
_lowercase : List[Any] = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : List[str] = rotora[index % len(SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowercase : List[str] = reflector[symbol]
# 2nd rotors
_lowercase : List[str] = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
_lowercase : Tuple = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
_lowercase : Dict = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowercase : int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Any = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : int = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Any = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = "This is my Python script that emulates the Enigma machine from WWII."
UpperCamelCase = (1, 1, 1)
UpperCamelCase = "pictures"
UpperCamelCase = (rotora, rotora, rotora)
UpperCamelCase = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 66 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
_lowercase : str = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
_lowercase : int = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
_lowercase : int = components[:-1] + [test_fn.replace('.py' , '' )]
_lowercase : Union[str, Any] = '.'.join(SCREAMING_SNAKE_CASE )
return test_module_path
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Tuple = get_module_path(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = importlib.import_module(SCREAMING_SNAKE_CASE )
return test_module
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Union[str, Any] = []
_lowercase : Tuple = get_test_module(SCREAMING_SNAKE_CASE )
for attr in dir(SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple:
_lowercase : str = []
_lowercase : Dict = get_test_module(SCREAMING_SNAKE_CASE )
for attr in dir(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Union[str, Any] = get_test_classes(SCREAMING_SNAKE_CASE )
_lowercase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : List[Any] = test_class()
if hasattr(SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
_lowercase : int = None
if hasattr(SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_lowercase : List[Any] = test.model_tester.__class__
return model_tester
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[str] = get_test_classes(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Optional[Any] = get_test_classes_for_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = []
for test_class in test_classes:
_lowercase : List[str] = get_model_tester_from_test_class(SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Dict = get_test_classes(SCREAMING_SNAKE_CASE )
_lowercase : int = {test_class: get_model_tester_from_test_class(SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : List[Any] = get_model_classes(SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = {
model_class: get_test_classes_for_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
_lowercase : int = get_model_classes(SCREAMING_SNAKE_CASE )
_lowercase : Any = {
model_class: get_tester_classes_for_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return o
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return {to_json(SCREAMING_SNAKE_CASE ): to_json(SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 66 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["GLPNFeatureExtractor"]
UpperCamelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase_ :
_UpperCamelCase : Dict = None
def __a ( self ):
_lowercase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_lowercase : Dict = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : Tuple = os.path.join(_lowerCAmelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(_lowerCAmelCase )
_lowercase : str = self.feature_extraction_class.from_json_file(_lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __a ( self ):
_lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : int = feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
_lowercase : int = self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __a ( self ):
_lowercase : Any = self.feature_extraction_class()
self.assertIsNotNone(_lowerCAmelCase )
| 66 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : List[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
_lowercase : List[Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_lowerCAmelCase )
from datasets import load_dataset
_lowercase : Union[str, Any] = load_dataset('nielsr/rvlcdip-demo' )
_lowercase : Any = dataset['train'][0]['image'].convert('RGB' )
_lowercase : List[str] = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : Dict = model(**_lowerCAmelCase )
_lowercase : Any = outputs.logits
_lowercase : str = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , _lowerCAmelCase )
_lowercase : Union[str, Any] = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=_lowerCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 66 | 1 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1 ) -> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> str:
_lowercase : Optional[Any] = []
for old_item in old_list:
_lowercase : List[Any] = old_item.replace('in_layers.0' , 'norm1' )
_lowercase : Optional[int] = new_item.replace('in_layers.2' , 'conv1' )
_lowercase : Dict = new_item.replace('out_layers.0' , 'norm2' )
_lowercase : Optional[Any] = new_item.replace('out_layers.3' , 'conv2' )
_lowercase : List[str] = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_lowercase : List[str] = new_item.replace('skip_connection' , 'conv_shortcut' )
_lowercase : List[Any] = shave_segments(SCREAMING_SNAKE_CASE , n_shave_prefix_segments=SCREAMING_SNAKE_CASE )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
_lowercase : str = []
for old_item in old_list:
_lowercase : List[str] = old_item
_lowercase : Union[str, Any] = new_item.replace('norm.weight' , 'group_norm.weight' )
_lowercase : List[str] = new_item.replace('norm.bias' , 'group_norm.bias' )
_lowercase : Union[str, Any] = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_lowercase : List[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_lowercase : List[Any] = shave_segments(SCREAMING_SNAKE_CASE , n_shave_prefix_segments=SCREAMING_SNAKE_CASE )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_lowercase : List[str] = old_checkpoint[path]
_lowercase : Any = old_tensor.shape[0] // 3
_lowercase : Dict = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_lowercase : str = old_tensor.shape[0] // config['num_head_channels'] // 3
_lowercase : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_lowercase , _lowercase , _lowercase : List[Any] = old_tensor.split(channels // num_heads , dim=1 )
_lowercase : List[Any] = query.reshape(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = key.reshape(SCREAMING_SNAKE_CASE )
_lowercase : int = value.reshape(SCREAMING_SNAKE_CASE )
for path in paths:
_lowercase : Union[str, Any] = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_lowercase : List[str] = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_lowercase : List[str] = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_lowercase : Dict = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_lowercase : Optional[int] = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_lowercase : Optional[Any] = old_checkpoint[path['old']][:, :, 0]
else:
_lowercase : str = old_checkpoint[path['old']]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
_lowercase : str = {}
_lowercase : Tuple = checkpoint['time_embed.0.weight']
_lowercase : Optional[Any] = checkpoint['time_embed.0.bias']
_lowercase : List[Any] = checkpoint['time_embed.2.weight']
_lowercase : int = checkpoint['time_embed.2.bias']
_lowercase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_lowercase : Optional[int] = checkpoint['input_blocks.0.0.bias']
_lowercase : Dict = checkpoint['out.0.weight']
_lowercase : Dict = checkpoint['out.0.bias']
_lowercase : Dict = checkpoint['out.2.weight']
_lowercase : Dict = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_lowercase : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_lowercase : int = {
layer_id: [key for key in checkpoint if F"""input_blocks.{layer_id}""" in key]
for layer_id in range(SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the middle blocks only
_lowercase : List[str] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_lowercase : Optional[int] = {
layer_id: [key for key in checkpoint if F"""middle_block.{layer_id}""" in key]
for layer_id in range(SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the output blocks only
_lowercase : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_lowercase : Union[str, Any] = {
layer_id: [key for key in checkpoint if F"""output_blocks.{layer_id}""" in key]
for layer_id in range(SCREAMING_SNAKE_CASE )
}
for i in range(1 , SCREAMING_SNAKE_CASE ):
_lowercase : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
_lowercase : List[Any] = (i - 1) % (config['num_res_blocks'] + 1)
_lowercase : Any = [key for key in input_blocks[i] if F"""input_blocks.{i}.0""" in key]
_lowercase : Tuple = [key for key in input_blocks[i] if F"""input_blocks.{i}.1""" in key]
if F"""input_blocks.{i}.0.op.weight""" in checkpoint:
_lowercase : Optional[int] = checkpoint[
F"""input_blocks.{i}.0.op.weight"""
]
_lowercase : Union[str, Any] = checkpoint[
F"""input_blocks.{i}.0.op.bias"""
]
continue
_lowercase : List[Any] = renew_resnet_paths(SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = {'old': F"""input_blocks.{i}.0""", 'new': F"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_lowercase : int = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , additional_replacements=[meta_path, resnet_op] , config=SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = renew_attention_paths(SCREAMING_SNAKE_CASE )
_lowercase : Dict = {
'old': F"""input_blocks.{i}.1""",
'new': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_lowercase : List[str] = {
F"""input_blocks.{i}.1.qkv.bias""": {
'key': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""input_blocks.{i}.1.qkv.weight""": {
'key': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE , )
_lowercase : Any = middle_blocks[0]
_lowercase : int = middle_blocks[1]
_lowercase : List[str] = middle_blocks[2]
_lowercase : List[Any] = renew_resnet_paths(SCREAMING_SNAKE_CASE )
assign_to_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = renew_resnet_paths(SCREAMING_SNAKE_CASE )
assign_to_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = renew_attention_paths(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , attention_paths_to_split=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
_lowercase : List[Any] = i // (config['num_res_blocks'] + 1)
_lowercase : int = i % (config['num_res_blocks'] + 1)
_lowercase : List[str] = [shave_segments(SCREAMING_SNAKE_CASE , 2 ) for name in output_blocks[i]]
_lowercase : int = {}
for layer in output_block_layers:
_lowercase , _lowercase : str = layer.split('.' )[0], shave_segments(SCREAMING_SNAKE_CASE , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(SCREAMING_SNAKE_CASE )
else:
_lowercase : int = [layer_name]
if len(SCREAMING_SNAKE_CASE ) > 1:
_lowercase : Any = [key for key in output_blocks[i] if F"""output_blocks.{i}.0""" in key]
_lowercase : Any = [key for key in output_blocks[i] if F"""output_blocks.{i}.1""" in key]
_lowercase : Union[str, Any] = renew_resnet_paths(SCREAMING_SNAKE_CASE )
_lowercase : Dict = renew_resnet_paths(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = {'old': F"""output_blocks.{i}.0""", 'new': F"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_lowercase : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_lowercase : Tuple = checkpoint[
F"""output_blocks.{i}.{index}.conv.weight"""
]
_lowercase : Union[str, Any] = checkpoint[
F"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(SCREAMING_SNAKE_CASE ) == 2:
_lowercase : Union[str, Any] = []
if len(SCREAMING_SNAKE_CASE ):
_lowercase : str = renew_attention_paths(SCREAMING_SNAKE_CASE )
_lowercase : int = {
'old': F"""output_blocks.{i}.1""",
'new': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_lowercase : Optional[int] = {
F"""output_blocks.{i}.1.qkv.bias""": {
'key': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""output_blocks.{i}.1.qkv.weight""": {
'key': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=SCREAMING_SNAKE_CASE , )
else:
_lowercase : Optional[int] = renew_resnet_paths(SCREAMING_SNAKE_CASE , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_lowercase : List[Any] = '.'.join(['output_blocks', str(SCREAMING_SNAKE_CASE ), path['old']] )
_lowercase : int = '.'.join(['up_blocks', str(SCREAMING_SNAKE_CASE ), 'resnets', str(SCREAMING_SNAKE_CASE ), path['new']] )
_lowercase : Optional[Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCamelCase = parser.parse_args()
UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCamelCase = json.loads(f.read())
UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCamelCase = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 66 |
from PIL import Image
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Image:
def brightness(SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 66 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[int] = "trajectory_transformer"
_UpperCamelCase : Optional[int] = ["past_key_values"]
_UpperCamelCase : Optional[Any] = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _lowerCAmelCase=1_0_0 , _lowerCAmelCase=5 , _lowerCAmelCase=1 , _lowerCAmelCase=1 , _lowerCAmelCase=2_4_9 , _lowerCAmelCase=6 , _lowerCAmelCase=1_7 , _lowerCAmelCase=2_5 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=1_2_8 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.00_06 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=1 , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=5_0_2_5_6 , _lowerCAmelCase=5_0_2_5_6 , **_lowerCAmelCase , ):
_lowercase : Optional[int] = vocab_size
_lowercase : int = action_weight
_lowercase : Optional[Any] = reward_weight
_lowercase : List[Any] = value_weight
_lowercase : List[str] = max_position_embeddings
_lowercase : Any = block_size
_lowercase : List[str] = action_dim
_lowercase : str = observation_dim
_lowercase : Any = transition_dim
_lowercase : Tuple = learning_rate
_lowercase : Tuple = n_layer
_lowercase : str = n_head
_lowercase : Optional[int] = n_embd
_lowercase : List[Any] = embd_pdrop
_lowercase : str = attn_pdrop
_lowercase : Optional[Any] = resid_pdrop
_lowercase : Tuple = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : str = kaiming_initializer_range
_lowercase : Any = use_cache
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 66 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[Any] = torch.nn.Linear(1_0 , 1_0 )
_lowercase : Any = torch.optim.SGD(model.parameters() , 0.1 )
_lowercase : str = Accelerator()
_lowercase : Any = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 66 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
if height >= 1:
move_tower(height - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
move_disk(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
move_tower(height - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
print('moving disk from' , SCREAMING_SNAKE_CASE , 'to' , SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> Optional[int]:
_lowercase : Tuple = int(input('Height of hanoi: ' ).strip() )
move_tower(SCREAMING_SNAKE_CASE , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 66 |
import requests
from bsa import BeautifulSoup
def __magic_name__ ( SCREAMING_SNAKE_CASE = "AAPL" ) -> str:
_lowercase : str = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_lowercase : int = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , 'html.parser' )
_lowercase : List[str] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 66 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
_lowercase : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowercase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowercase : Dict = np.concatenate(SCREAMING_SNAKE_CASE , axis=0 )
_lowercase : Dict = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
_lowercase : Any = image.transpose(0 , 3 , 1 , 2 )
_lowercase : Optional[int] = 2.0 * image - 1.0
_lowercase : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE )
elif isinstance(image[0] , torch.Tensor ):
_lowercase : Optional[Any] = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
return image
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0.9995 ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
_lowercase : Optional[Any] = True
_lowercase : Any = va.device
_lowercase : str = va.cpu().numpy()
_lowercase : str = va.cpu().numpy()
_lowercase : Tuple = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE ) * np.linalg.norm(SCREAMING_SNAKE_CASE )) )
if np.abs(SCREAMING_SNAKE_CASE ) > DOT_THRESHOLD:
_lowercase : Dict = (1 - t) * va + t * va
else:
_lowercase : Any = np.arccos(SCREAMING_SNAKE_CASE )
_lowercase : Dict = np.sin(SCREAMING_SNAKE_CASE )
_lowercase : Any = theta_a * t
_lowercase : Optional[Any] = np.sin(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = np.sin(theta_a - theta_t ) / sin_theta_a
_lowercase : List[str] = sin_theta_t / sin_theta_a
_lowercase : Optional[Any] = sa * va + sa * va
if inputs_are_torch:
_lowercase : Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
return va
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Any = F.normalize(SCREAMING_SNAKE_CASE , dim=-1 )
_lowercase : Union[str, Any] = F.normalize(SCREAMING_SNAKE_CASE , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
for param in model.parameters():
_lowercase : List[str] = value
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , ):
super().__init__()
self.register_modules(
vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , clip_model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , coca_model=_lowerCAmelCase , coca_tokenizer=_lowerCAmelCase , coca_transform=_lowerCAmelCase , )
_lowercase : str = (
feature_extractor.size
if isinstance(feature_extractor.size , _lowerCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowercase : List[str] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _lowerCAmelCase )
set_requires_grad(self.clip_model , _lowerCAmelCase )
def __a ( self , _lowerCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowercase : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def __a ( self ):
self.enable_attention_slicing(_lowerCAmelCase )
def __a ( self ):
set_requires_grad(self.vae , _lowerCAmelCase )
def __a ( self ):
set_requires_grad(self.vae , _lowerCAmelCase )
def __a ( self ):
set_requires_grad(self.unet , _lowerCAmelCase )
def __a ( self ):
set_requires_grad(self.unet , _lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# get the original timestep using init_timestep
_lowercase : Union[str, Any] = min(int(num_inference_steps * strength ) , _lowerCAmelCase )
_lowercase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowercase : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
if not isinstance(_lowerCAmelCase , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(_lowerCAmelCase )}""" )
_lowercase : Tuple = image.to(device=_lowerCAmelCase , dtype=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowerCAmelCase )
]
_lowercase : int = torch.cat(_lowerCAmelCase , dim=0 )
else:
_lowercase : List[str] = self.vae.encode(_lowerCAmelCase ).latent_dist.sample(_lowerCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Optional[int] = 0.1_82_15 * init_latents
_lowercase : int = init_latents.repeat_interleave(_lowerCAmelCase , dim=0 )
_lowercase : Union[str, Any] = randn_tensor(init_latents.shape , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
# get latents
_lowercase : Dict = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = init_latents
return latents
def __a ( self , _lowerCAmelCase ):
_lowercase : Dict = self.coca_transform(_lowerCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowercase : List[str] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowercase : List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.feature_extractor.preprocess(_lowerCAmelCase )
_lowercase : Any = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowercase : int = self.clip_model.get_image_features(_lowerCAmelCase )
_lowercase : Any = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_lowerCAmelCase )
_lowercase : List[Any] = image_embeddings_clip.repeat_interleave(_lowerCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : Optional[Any] = latents.detach().requires_grad_()
_lowercase : Any = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
_lowercase : Union[str, Any] = self.unet(_lowerCAmelCase , _lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowercase : List[str] = self.scheduler.alphas_cumprod[timestep]
_lowercase : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : Dict = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowercase : Any = torch.sqrt(_lowerCAmelCase )
_lowercase : Union[str, Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.scheduler.sigmas[index]
_lowercase : Optional[int] = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Tuple = 1 / 0.1_82_15 * sample
_lowercase : Union[str, Any] = self.vae.decode(_lowerCAmelCase ).sample
_lowercase : Any = (image / 2 + 0.5).clamp(0 , 1 )
_lowercase : str = transforms.Resize(self.feature_extractor_size )(_lowerCAmelCase )
_lowercase : Union[str, Any] = self.normalize(_lowerCAmelCase ).to(latents.dtype )
_lowercase : Optional[Any] = self.clip_model.get_image_features(_lowerCAmelCase )
_lowercase : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_lowerCAmelCase )
_lowercase : Optional[Any] = spherical_dist_loss(_lowerCAmelCase , _lowerCAmelCase ).mean() * clip_guidance_scale
_lowercase : Tuple = -torch.autograd.grad(_lowerCAmelCase , _lowerCAmelCase )[0]
if isinstance(self.scheduler , _lowerCAmelCase ):
_lowercase : List[Any] = latents.detach() + grads * (sigma**2)
_lowercase : Union[str, Any] = noise_pred_original
else:
_lowercase : int = noise_pred_original - torch.sqrt(_lowerCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 0.6 , _lowerCAmelCase = 5_0 , _lowerCAmelCase = 7.5 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1_0_0 , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , _lowerCAmelCase = 0.8 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(_lowerCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_lowerCAmelCase , torch.Generator ) and batch_size > 1:
_lowercase : List[Any] = [generator] + [None] * (batch_size - 1)
_lowercase : List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowercase : Dict = [x[0] for x in coca_is_none if x[1]]
_lowercase : Any = ', '.join(_lowerCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowercase : Tuple = self.get_image_description(_lowerCAmelCase )
if style_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowercase : List[str] = self.get_image_description(_lowerCAmelCase )
# get prompt text embeddings for content and style
_lowercase : int = self.tokenizer(
_lowerCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='pt' , )
_lowercase : List[str] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowercase : Tuple = self.tokenizer(
_lowerCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='pt' , )
_lowercase : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowercase : int = slerp(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowercase : Optional[int] = text_embeddings.repeat_interleave(_lowerCAmelCase , dim=0 )
# set timesteps
_lowercase : Tuple = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowercase : Optional[Any] = {}
if accepts_offset:
_lowercase : int = 1
self.scheduler.set_timesteps(_lowerCAmelCase , **_lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowercase , _lowercase : List[str] = self.get_timesteps(_lowerCAmelCase , _lowerCAmelCase , self.device )
_lowercase : str = timesteps[:1].repeat(_lowerCAmelCase )
# Preprocess image
_lowercase : List[Any] = preprocess(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Tuple = self.prepare_latents(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text_embeddings.dtype , self.device , _lowerCAmelCase )
_lowercase : List[str] = preprocess(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = self.prepare_latents(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text_embeddings.dtype , self.device , _lowerCAmelCase )
_lowercase : int = slerp(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if clip_guidance_scale > 0:
_lowercase : int = self.get_clip_image_embeddings(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = self.get_clip_image_embeddings(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Tuple = slerp(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowercase : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowercase : Optional[int] = content_text_input.input_ids.shape[-1]
_lowercase : Optional[int] = self.tokenizer([''] , padding='max_length' , max_length=_lowerCAmelCase , return_tensors='pt' )
_lowercase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowercase : Tuple = uncond_embeddings.repeat_interleave(_lowerCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowercase : Union[str, Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowercase : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowercase : Optional[int] = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device='cpu' , dtype=_lowerCAmelCase ).to(
self.device )
else:
_lowercase : Tuple = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowercase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowercase : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowercase : Optional[int] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowercase : str = {}
if accepts_eta:
_lowercase : Union[str, Any] = eta
# check if the scheduler accepts generator
_lowercase : Tuple = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowercase : int = generator
with self.progress_bar(total=_lowerCAmelCase ):
for i, t in enumerate(_lowerCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowercase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowercase : Tuple = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
_lowercase : Optional[Any] = self.unet(_lowerCAmelCase , _lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowercase , _lowercase : Any = noise_pred.chunk(2 )
_lowercase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowercase : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowercase , _lowercase : Tuple = self.cond_fn(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Optional[Any] = 1 / 0.1_82_15 * latents
_lowercase : List[Any] = self.vae.decode(_lowerCAmelCase ).sample
_lowercase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowercase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowercase : Tuple = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_lowerCAmelCase , nsfw_content_detected=_lowerCAmelCase )
| 66 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["PoolFormerFeatureExtractor"]
UpperCamelCase = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = LayoutLMTokenizer
_UpperCamelCase : Union[str, Any] = LayoutLMTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = True
def __a ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __a ( self , **_lowerCAmelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : str = 'UNwant\u00E9d,running'
_lowercase : List[Any] = 'unwanted, running'
return input_text, output_text
def __a ( self ):
_lowercase : Dict = self.tokenizer_class(self.vocab_file )
_lowercase : Dict = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [7, 4, 5, 1_0, 8, 9] )
def __a ( self ):
pass
| 66 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase = 4
UpperCamelCase = 3
class lowerCAmelCase_ ( __snake_case ):
pass
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for shard in shards:
for i in range(SCREAMING_SNAKE_CASE ):
yield {"i": i, "shard": shard}
def __magic_name__ ( ) -> List[Any]:
_lowercase : Optional[Any] = int(os.environ['RANK'] )
_lowercase : Optional[Any] = int(os.environ['WORLD_SIZE'] )
_lowercase : Dict = ArgumentParser()
parser.add_argument('--streaming' , type=SCREAMING_SNAKE_CASE )
parser.add_argument('--local_rank' , type=SCREAMING_SNAKE_CASE )
parser.add_argument('--num_workers' , type=SCREAMING_SNAKE_CASE , default=0 )
_lowercase : int = parser.parse_args()
_lowercase : List[Any] = args.streaming
_lowercase : Any = args.num_workers
_lowercase : List[str] = {'shards': [F"""shard_{shard_idx}""" for shard_idx in range(SCREAMING_SNAKE_CASE )]}
_lowercase : str = IterableDataset.from_generator(SCREAMING_SNAKE_CASE , gen_kwargs=SCREAMING_SNAKE_CASE )
if not streaming:
_lowercase : List[str] = Dataset.from_list(list(SCREAMING_SNAKE_CASE ) )
_lowercase : List[str] = split_dataset_by_node(SCREAMING_SNAKE_CASE , rank=SCREAMING_SNAKE_CASE , world_size=SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = torch.utils.data.DataLoader(SCREAMING_SNAKE_CASE , num_workers=SCREAMING_SNAKE_CASE )
_lowercase : str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_lowercase : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_lowercase : Union[str, Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 66 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : str = ShapEPipeline
_UpperCamelCase : Any = ["prompt"]
_UpperCamelCase : int = ["prompt"]
_UpperCamelCase : Union[str, Any] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : Optional[Any] = False
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return self.time_input_dim * 4
@property
def __a ( self ):
return 8
@property
def __a ( self ):
_lowercase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_lowerCAmelCase )
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = {
'num_attention_heads': 2,
'attention_head_dim': 1_6,
'embedding_dim': self.time_input_dim,
'num_embeddings': 3_2,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowercase : Optional[Any] = PriorTransformer(**_lowerCAmelCase )
return model
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = {
'param_shapes': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 1_2,
'background': (
0.1,
0.1,
0.1,
),
}
_lowercase : List[Any] = ShapERenderer(**_lowerCAmelCase )
return model
def __a ( self ):
_lowercase : Optional[Any] = self.dummy_prior
_lowercase : Dict = self.dummy_text_encoder
_lowercase : List[str] = self.dummy_tokenizer
_lowercase : Union[str, Any] = self.dummy_renderer
_lowercase : List[str] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
_lowercase : List[str] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : Optional[Any] = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : List[Any] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 3_2,
'output_type': 'np',
}
return inputs
def __a ( self ):
_lowercase : Optional[int] = 'cpu'
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : Tuple = self.pipeline_class(**_lowerCAmelCase )
_lowercase : Union[str, Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Union[str, Any] = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
_lowercase : str = output.images[0]
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
_lowercase : str = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self ):
_lowercase : List[Any] = torch_device == 'cpu'
_lowercase : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def __a ( self ):
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : Optional[int] = self.pipeline_class(**_lowerCAmelCase )
_lowercase : Any = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : str = 1
_lowercase : Optional[int] = 2
_lowercase : List[str] = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
_lowercase : int = batch_size * [inputs[key]]
_lowercase : Optional[int] = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
_lowercase : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
_lowercase : List[str] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Tuple = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowercase : int = pipe(
'a shark' , generator=_lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 66 | 1 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = LayoutLMTokenizer
_UpperCamelCase : Union[str, Any] = LayoutLMTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = True
def __a ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __a ( self , **_lowerCAmelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : str = 'UNwant\u00E9d,running'
_lowercase : List[Any] = 'unwanted, running'
return input_text, output_text
def __a ( self ):
_lowercase : Dict = self.tokenizer_class(self.vocab_file )
_lowercase : Dict = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [7, 4, 5, 1_0, 8, 9] )
def __a ( self ):
pass
| 66 |
import sys
UpperCamelCase = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[Any] = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def __magic_name__ ( SCREAMING_SNAKE_CASE = N ) -> int:
_lowercase : Dict = -sys.maxsize - 1
_lowercase : Tuple = n[:13]
_lowercase : List[Any] = 13
while cur_index < len(SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_lowercase : List[str] = substr[1:] + n[cur_index]
cur_index += 1
else:
_lowercase : str = max(SCREAMING_SNAKE_CASE , str_eval(SCREAMING_SNAKE_CASE ) )
_lowercase : Dict = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Dict = ["torch", "scipy"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch', 'scipy'] )
| 66 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Union[str, Any] = "speech_to_text"
_UpperCamelCase : Optional[int] = ["past_key_values"]
_UpperCamelCase : List[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=1_0_0_0_0 , _lowerCAmelCase=1_2 , _lowerCAmelCase=2_0_4_8 , _lowerCAmelCase=4 , _lowerCAmelCase=6 , _lowerCAmelCase=2_0_4_8 , _lowerCAmelCase=4 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=6_0_0_0 , _lowerCAmelCase=1_0_2_4 , _lowerCAmelCase=2 , _lowerCAmelCase=(5, 5) , _lowerCAmelCase=1_0_2_4 , _lowerCAmelCase=8_0 , _lowerCAmelCase=1 , **_lowerCAmelCase , ):
_lowercase : Tuple = vocab_size
_lowercase : int = d_model
_lowercase : Tuple = encoder_ffn_dim
_lowercase : Union[str, Any] = encoder_layers
_lowercase : Dict = encoder_attention_heads
_lowercase : int = decoder_ffn_dim
_lowercase : Any = decoder_layers
_lowercase : Optional[int] = decoder_attention_heads
_lowercase : Optional[int] = dropout
_lowercase : Optional[int] = attention_dropout
_lowercase : str = activation_dropout
_lowercase : int = activation_function
_lowercase : Dict = init_std
_lowercase : Any = encoder_layerdrop
_lowercase : Optional[int] = decoder_layerdrop
_lowercase : Tuple = use_cache
_lowercase : int = encoder_layers
_lowercase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase : Any = max_source_positions
_lowercase : Any = max_target_positions
_lowercase : Any = num_conv_layers
_lowercase : Union[str, Any] = list(_lowerCAmelCase )
_lowercase : List[Any] = conv_channels
_lowercase : int = input_feat_per_channel
_lowercase : Union[str, Any] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 66 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : str = ["image_processor", "tokenizer"]
_UpperCamelCase : Union[str, Any] = "AutoImageProcessor"
_UpperCamelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Union[str, Any] = self.image_processor
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if images is not None:
_lowercase : Union[str, Any] = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and images is not None:
_lowercase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 66 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCamelCase = datasets.utils.logging.get_logger(__name__)
UpperCamelCase = ["names", "prefix"]
UpperCamelCase = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
UpperCamelCase = ["encoding_errors", "on_bad_lines"]
UpperCamelCase = ["date_format"]
@dataclass
class lowerCAmelCase_ ( datasets.BuilderConfig ):
_UpperCamelCase : str = ","
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : Optional[Union[int, List[int], str]] = "infer"
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[Union[int, str, List[int], List[str]]] = None
_UpperCamelCase : Optional[Union[List[int], List[str]]] = None
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : bool = True
_UpperCamelCase : Optional[Literal["c", "python", "pyarrow"]] = None
_UpperCamelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
_UpperCamelCase : Optional[list] = None
_UpperCamelCase : Optional[list] = None
_UpperCamelCase : bool = False
_UpperCamelCase : Optional[Union[int, List[int]]] = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Optional[Union[str, List[str]]] = None
_UpperCamelCase : bool = True
_UpperCamelCase : bool = True
_UpperCamelCase : bool = False
_UpperCamelCase : bool = True
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : str = "."
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : str = '"'
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : bool = True
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : bool = True
_UpperCamelCase : bool = False
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : int = 10000
_UpperCamelCase : Optional[datasets.Features] = None
_UpperCamelCase : Optional[str] = "strict"
_UpperCamelCase : Literal["error", "warn", "skip"] = "error"
_UpperCamelCase : Optional[str] = None
def __a ( self ):
if self.delimiter is not None:
_lowercase : Optional[Any] = self.delimiter
if self.column_names is not None:
_lowercase : List[str] = self.column_names
@property
def __a ( self ):
_lowercase : int = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _lowerCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : int = CsvConfig
def __a ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , _lowerCAmelCase ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowercase : List[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
_lowercase : List[str] = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = [files]
_lowercase : Union[str, Any] = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowercase : Any = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = [files]
_lowercase : Union[str, Any] = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={'files': files} ) )
return splits
def __a ( self , _lowerCAmelCase ):
if self.config.features is not None:
_lowercase : Tuple = self.config.features.arrow_schema
if all(not require_storage_cast(_lowerCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
_lowercase : Tuple = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_lowerCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_lowercase : str = table_cast(_lowerCAmelCase , _lowerCAmelCase )
return pa_table
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_lowercase : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
_lowercase : Dict = pd.read_csv(_lowerCAmelCase , iterator=_lowerCAmelCase , dtype=_lowerCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_lowerCAmelCase ):
_lowercase : Tuple = pa.Table.from_pandas(_lowerCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(_lowerCAmelCase )}: {e}""" )
raise
| 66 |
from __future__ import annotations
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]:
if num <= 0:
_lowercase : List[str] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = [True] * (num + 1)
_lowercase : Union[str, Any] = []
_lowercase : Dict = 2
_lowercase : Union[str, Any] = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
_lowercase : str = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 66 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Dict = "informer"
_UpperCamelCase : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "student_t" , _lowerCAmelCase = "nll" , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = "mean" , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 6_4 , _lowerCAmelCase = 3_2 , _lowerCAmelCase = 3_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = True , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.05 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 1_0_0 , _lowerCAmelCase = 0.02 , _lowerCAmelCase=True , _lowerCAmelCase = "prob" , _lowerCAmelCase = 5 , _lowerCAmelCase = True , **_lowerCAmelCase , ):
# time series specific configuration
_lowercase : int = prediction_length
_lowercase : str = context_length or prediction_length
_lowercase : List[str] = distribution_output
_lowercase : List[Any] = loss
_lowercase : Optional[int] = input_size
_lowercase : int = num_time_features
_lowercase : int = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_lowercase : Tuple = scaling
_lowercase : Any = num_dynamic_real_features
_lowercase : Union[str, Any] = num_static_real_features
_lowercase : int = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_lowercase : Tuple = cardinality
else:
_lowercase : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_lowercase : List[str] = embedding_dimension
else:
_lowercase : Tuple = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_lowercase : List[str] = num_parallel_samples
# Transformer architecture configuration
_lowercase : int = input_size * len(self.lags_sequence ) + self._number_of_features
_lowercase : List[str] = d_model
_lowercase : Optional[Any] = encoder_attention_heads
_lowercase : str = decoder_attention_heads
_lowercase : List[Any] = encoder_ffn_dim
_lowercase : Dict = decoder_ffn_dim
_lowercase : Any = encoder_layers
_lowercase : List[str] = decoder_layers
_lowercase : Tuple = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : Tuple = activation_dropout
_lowercase : List[str] = encoder_layerdrop
_lowercase : List[Any] = decoder_layerdrop
_lowercase : int = activation_function
_lowercase : Any = init_std
_lowercase : int = use_cache
# Informer
_lowercase : Optional[int] = attention_type
_lowercase : Union[str, Any] = sampling_factor
_lowercase : Optional[int] = distil
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 66 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : int = 384
if "tiny" in model_name:
_lowercase : Tuple = [3, 3, 9, 3]
_lowercase : List[str] = [96, 192, 384, 768]
if "small" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : Union[str, Any] = [96, 192, 384, 768]
if "base" in model_name:
_lowercase : List[Any] = [3, 3, 27, 3]
_lowercase : Dict = [128, 256, 512, 1_024]
_lowercase : Optional[int] = 512
if "large" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : List[Any] = [192, 384, 768, 1_536]
_lowercase : Tuple = 768
if "xlarge" in model_name:
_lowercase : str = [3, 3, 27, 3]
_lowercase : List[str] = [256, 512, 1_024, 2_048]
_lowercase : Tuple = 1_024
# set label information
_lowercase : Dict = 150
_lowercase : Union[str, Any] = 'huggingface/label-files'
_lowercase : str = 'ade20k-id2label.json'
_lowercase : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_lowercase : Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
_lowercase : List[str] = ConvNextConfig(
depths=SCREAMING_SNAKE_CASE , hidden_sizes=SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_lowercase : Union[str, Any] = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE , auxiliary_in_channels=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , )
return config
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Any = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Any = dct.pop(SCREAMING_SNAKE_CASE )
_lowercase : Any = val
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : List[Any] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
_lowercase : Optional[int] = model_name_to_url[model_name]
_lowercase : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
_lowercase : Optional[int] = get_upernet_config(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowercase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
if "bn" in key:
_lowercase : Any = key.replace('bn' , 'batch_norm' )
_lowercase : Any = val
# rename keys
_lowercase : int = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify on image
_lowercase : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
_lowercase : Tuple = SegformerImageProcessor()
_lowercase : Tuple = processor(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowercase : Dict = model(SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
_lowercase : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
_lowercase : Union[str, Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
_lowercase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
_lowercase : Optional[int] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
_lowercase : str = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __magic_name__ ( SCREAMING_SNAKE_CASE = 8 ) -> str:
_lowercase : Any = ascii_letters + digits + punctuation
return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(SCREAMING_SNAKE_CASE )
_lowercase : Any = i // 3
_lowercase : Union[str, Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowercase : Dict = (
chars_incl
+ random(SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
+ random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
)
_lowercase : Any = list(SCREAMING_SNAKE_CASE )
shuffle(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
pass # Put your code here...
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
pass # Put your code here...
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
pass # Put your code here...
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowercase : List[str] = any(char in ascii_uppercase for char in password )
_lowercase : Union[str, Any] = any(char in ascii_lowercase for char in password )
_lowercase : Optional[int] = any(char in digits for char in password )
_lowercase : int = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __magic_name__ ( ) -> int:
_lowercase : Tuple = int(input('Please indicate the max length of your password: ' ).strip() )
_lowercase : Optional[Any] = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(SCREAMING_SNAKE_CASE ) )
print(
'Alternative Password generated:' , alternative_password_generator(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 66 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = "upernet"
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=3_8_4 , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=2_5_5 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowercase : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[Any] = backbone_config.get('model_type' )
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Tuple = config_class.from_dict(_lowerCAmelCase )
_lowercase : Optional[Any] = backbone_config
_lowercase : Any = hidden_size
_lowercase : Any = initializer_range
_lowercase : Tuple = pool_scales
_lowercase : List[Any] = use_auxiliary_head
_lowercase : Optional[Any] = auxiliary_loss_weight
_lowercase : Any = auxiliary_in_channels
_lowercase : Any = auxiliary_channels
_lowercase : List[str] = auxiliary_num_convs
_lowercase : List[str] = auxiliary_concat_input
_lowercase : Tuple = loss_ignore_index
def __a ( self ):
_lowercase : str = copy.deepcopy(self.__dict__ )
_lowercase : Tuple = self.backbone_config.to_dict()
_lowercase : int = self.__class__.model_type
return output
| 66 | 1 |
import argparse
import os
import re
UpperCamelCase = "src/diffusers"
# Pattern that looks at the indentation in a line.
UpperCamelCase = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase = re.compile(r"\[([^\]]+)\]")
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[str] = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Tuple:
_lowercase : List[Any] = 0
_lowercase : List[Any] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
_lowercase : List[str] = ['\n'.join(lines[:index] )]
else:
_lowercase : Union[str, Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowercase : List[str] = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
_lowercase : List[str] = [lines[index + 1]]
index += 1
else:
_lowercase : Optional[Any] = []
else:
blocks.append('\n'.join(SCREAMING_SNAKE_CASE ) )
_lowercase : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append('\n'.join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
def _inner(SCREAMING_SNAKE_CASE ):
return key(SCREAMING_SNAKE_CASE ).lower().replace('_' , '' )
return _inner
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
# If no key is provided, we use a noop.
def noop(SCREAMING_SNAKE_CASE ):
return x
if key is None:
_lowercase : Optional[int] = noop
# Constants are all uppercase, they go first.
_lowercase : Dict = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowercase : List[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
_lowercase : Optional[int] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
_lowercase : Optional[int] = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(SCREAMING_SNAKE_CASE ):
_lowercase : List[Any] = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
_lowercase : Tuple = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase : List[str] = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
_lowercase : Dict = import_statement.split('\n' )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowercase : Any = 2 if lines[1].strip() == '[' else 1
_lowercase : Optional[int] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowercase : Optional[Any] = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
_lowercase : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowercase : Any = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowercase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase : Any = keys[:-1]
_lowercase : Optional[int] = get_indent(lines[1] ) + ', '.join([F"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
_lowercase : int = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , 'r' ) as f:
_lowercase : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowercase : Optional[int] = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowercase : Dict = main_blocks[block_idx]
_lowercase : Dict = block.split('\n' )
# Get to the start of the imports.
_lowercase : Optional[Any] = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowercase : Optional[int] = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowercase : List[Any] = '\n'.join(block_lines[line_idx:-1] )
_lowercase : Any = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowercase : List[str] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowercase : List[str] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowercase : List[Any] = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowercase : List[str] = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
_lowercase : List[Any] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowercase : Dict = 0
_lowercase : Tuple = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowercase : List[str] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
_lowercase : str = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write('\n'.join(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
_lowercase : Union[str, Any] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
_lowercase : Optional[int] = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , check_only=SCREAMING_SNAKE_CASE )
if result:
_lowercase : Optional[int] = [os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
UpperCamelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 66 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowercase : str = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_lowercase : int = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_lowercase : List[Any] = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCamelCase = ""
UpperCamelCase = ""
UpperCamelCase = ""
UpperCamelCase = 1 # (0 is vertical, 1 is horizontal)
def __magic_name__ ( ) -> None:
_lowercase , _lowercase : List[str] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print('Processing...' )
_lowercase , _lowercase , _lowercase : Optional[Any] = update_image_and_anno(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index, image in enumerate(SCREAMING_SNAKE_CASE ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowercase : Dict = random_chars(32 )
_lowercase : int = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowercase : Union[str, Any] = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE )} with {file_name}""" )
_lowercase : Tuple = []
for anno in new_annos[index]:
_lowercase : Any = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(F"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[list, list]:
_lowercase : Tuple = []
_lowercase : Optional[int] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '*.txt' ) ):
_lowercase : Any = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
_lowercase : Optional[int] = in_file.readlines()
_lowercase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE , F"""{label_name}.jpg""" )
_lowercase : List[Any] = []
for obj_list in obj_lists:
_lowercase : Optional[int] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 ) -> tuple[list, list, list]:
_lowercase : Optional[int] = []
_lowercase : Tuple = []
_lowercase : str = []
for idx in range(len(SCREAMING_SNAKE_CASE ) ):
_lowercase : Dict = []
_lowercase : Tuple = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE )
_lowercase : int = anno_list[idx]
_lowercase : int = cva.imread(SCREAMING_SNAKE_CASE )
if flip_type == 1:
_lowercase : int = cva.flip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for bbox in img_annos:
_lowercase : Union[str, Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowercase : str = cva.flip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for bbox in img_annos:
_lowercase : List[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE )
new_imgs_list.append(SCREAMING_SNAKE_CASE )
return new_imgs_list, new_annos_lists, path_list
def __magic_name__ ( SCREAMING_SNAKE_CASE = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_lowercase : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 66 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : int = IFInpaintingSuperResolutionPipeline
_UpperCamelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
_UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {"latents"}
def __a ( self ):
return self._get_superresolution_dummy_components()
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : int = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Union[str, Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __a ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __a ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __a ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ):
self._test_save_load_local()
def __a ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 66 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : str = tempfile.mkdtemp()
_lowercase : Tuple = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_lowercase : str = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self , **_lowerCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , **_lowerCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , **_lowerCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self ):
shutil.rmtree(self.tmpdirname )
def __a ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_lowercase : Optional[Any] = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : Optional[Any] = self.get_rust_tokenizer()
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : Union[str, Any] = AlignProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
_lowercase : Dict = AlignProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase )
def __a ( self ):
_lowercase : int = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : List[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowercase : str = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
_lowercase : str = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Tuple = AlignProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : Optional[int] = self.prepare_image_inputs()
_lowercase : Union[str, Any] = image_processor(_lowerCAmelCase , return_tensors='np' )
_lowercase : Dict = processor(images=_lowerCAmelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __a ( self ):
_lowercase : Optional[int] = self.get_image_processor()
_lowercase : Dict = self.get_tokenizer()
_lowercase : str = AlignProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : Dict = 'lower newer'
_lowercase : str = processor(text=_lowerCAmelCase )
_lowercase : Dict = tokenizer(_lowerCAmelCase , padding='max_length' , max_length=6_4 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __a ( self ):
_lowercase : Any = self.get_image_processor()
_lowercase : Any = self.get_tokenizer()
_lowercase : List[str] = AlignProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : Tuple = 'lower newer'
_lowercase : List[str] = self.prepare_image_inputs()
_lowercase : List[Any] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def __a ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Any = self.get_tokenizer()
_lowercase : List[Any] = AlignProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : Optional[Any] = processor.batch_decode(_lowerCAmelCase )
_lowercase : Union[str, Any] = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.get_image_processor()
_lowercase : Dict = self.get_tokenizer()
_lowercase : str = AlignProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowercase : int = 'lower newer'
_lowercase : int = self.prepare_image_inputs()
_lowercase : Optional[Any] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 66 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[float, float]:
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_lowercase , _lowercase , _lowercase : Tuple = equationa
_lowercase , _lowercase , _lowercase : Dict = equationa
# Calculate the determinants of the matrices
_lowercase : str = aa * ba - aa * ba
_lowercase : Any = ca * ba - ca * ba
_lowercase : Optional[int] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase : Union[str, Any] = determinant_x / determinant
_lowercase : Tuple = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 66 | 1 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : List[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
_lowercase : List[Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_lowerCAmelCase )
from datasets import load_dataset
_lowercase : Union[str, Any] = load_dataset('nielsr/rvlcdip-demo' )
_lowercase : Any = dataset['train'][0]['image'].convert('RGB' )
_lowercase : List[str] = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : Dict = model(**_lowerCAmelCase )
_lowercase : Any = outputs.logits
_lowercase : str = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , _lowerCAmelCase )
_lowercase : Union[str, Any] = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=_lowerCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 66 |
def __magic_name__ ( SCREAMING_SNAKE_CASE = 50 ) -> int:
_lowercase : Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 | 1 |
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : Tuple = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
_lowercase : Dict = [2, 4, 6, 8, 1_0, 1_2]
_lowercase : int = 1_0_0
self.assertEqual(kp.calc_profit(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , 2_1_0 )
def __a ( self ):
self.assertRaisesRegex(_lowerCAmelCase , 'max_weight must greater than zero.' )
def __a ( self ):
self.assertRaisesRegex(_lowerCAmelCase , 'Weight can not be negative.' )
def __a ( self ):
self.assertRaisesRegex(_lowerCAmelCase , 'Profit can not be negative.' )
def __a ( self ):
self.assertRaisesRegex(_lowerCAmelCase , 'max_weight must greater than zero.' )
def __a ( self ):
self.assertRaisesRegex(
_lowerCAmelCase , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 66 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConvNextFeatureExtractor"]
UpperCamelCase = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 1 |
import argparse
import copy
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {}
with open(snake_case ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__magic_name__ :Tuple = []
_list.append([line.split()[1], line.split()[2]] )
__magic_name__ :Dict = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__magic_name__ :Optional[Any] = []
_list.append([line.split()[0], line.split()[2]] )
__magic_name__ :Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
with open(snake_case ) as f:
__magic_name__ :Optional[Any] = f.read(1 )
__magic_name__ :List[Any] = start_node
__magic_name__ :int = []
__magic_name__ :str = start_node
__magic_name__ :Optional[Any] = 0
while visiting not in first_solution:
__magic_name__ :List[str] = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(snake_case ) and k[0] not in first_solution:
__magic_name__ :Optional[Any] = k[1]
__magic_name__ :int = k[0]
first_solution.append(snake_case )
__magic_name__ :Dict = distance_of_first_solution + int(snake_case )
__magic_name__ :int = best_node
first_solution.append(snake_case )
__magic_name__ :Tuple = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__magic_name__ :Optional[int] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = []
for n in solution[1:-1]:
__magic_name__ :List[str] = solution.index(snake_case )
for kn in solution[1:-1]:
__magic_name__ :str = solution.index(snake_case )
if n == kn:
continue
__magic_name__ :Union[str, Any] = copy.deepcopy(snake_case )
__magic_name__ :List[str] = kn
__magic_name__ :Optional[int] = n
__magic_name__ :Any = 0
for k in _tmp[:-1]:
__magic_name__ :Tuple = _tmp[_tmp.index(snake_case ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__magic_name__ :List[str] = distance + int(i[1] )
_tmp.append(snake_case )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__magic_name__ :Optional[int] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda snake_case : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = 1
__magic_name__ :Dict = first_solution
__magic_name__ :Optional[Any] = []
__magic_name__ :Optional[Any] = distance_of_first_solution
__magic_name__ :str = solution
while count <= iters:
__magic_name__ :Dict = find_neighborhood(snake_case, snake_case )
__magic_name__ :Any = 0
__magic_name__ :Union[str, Any] = neighborhood[index_of_best_solution]
__magic_name__ :Union[str, Any] = len(snake_case ) - 1
__magic_name__ :str = False
while not found:
__magic_name__ :List[str] = 0
while i < len(snake_case ):
if best_solution[i] != solution[i]:
__magic_name__ :List[str] = best_solution[i]
__magic_name__ :Any = solution[i]
break
__magic_name__ :Any = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__magic_name__ :List[str] = True
__magic_name__ :List[str] = best_solution[:-1]
__magic_name__ :int = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__magic_name__ :Union[str, Any] = cost
__magic_name__ :List[str] = solution
else:
__magic_name__ :Optional[int] = index_of_best_solution + 1
__magic_name__ :Optional[Any] = neighborhood[index_of_best_solution]
if len(snake_case ) >= size:
tabu_list.pop(0 )
__magic_name__ :List[Any] = count + 1
return best_solution_ever, best_cost
def __lowercase ( snake_case=None ):
"""simple docstring"""
__magic_name__ :str = generate_neighbours(args.File )
__magic_name__ , __magic_name__ :List[str] = generate_first_solution(
args.File, snake_case )
__magic_name__ , __magic_name__ :List[Any] = tabu_search(
snake_case, snake_case, snake_case, args.Iterations, args.Size, )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 0 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
_lowercase : Optional[Any] = 4
_lowercase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
_lowercase : Union[str, Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 66 | 0 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
__snake_case = 1_0_0
__snake_case = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__snake_case = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def _A ( _lowercase ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__UpperCamelCase = set()
__UpperCamelCase = 42
__UpperCamelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _A ( _lowercase = 50_00 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , _lowercase ):
if len(partition(_lowercase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 |
import random
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> dict:
_lowercase : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE )
return graph
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict:
return {
i: [j for j in range(SCREAMING_SNAKE_CASE ) if i != j] for i in range(SCREAMING_SNAKE_CASE )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase__ :
"""simple docstring"""
a__ : str = PegasusConfig
a__ : str = {}
a__ : int = "gelu"
def __init__( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict=13 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Any=False , __lowerCAmelCase : Union[str, Any]=99 , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Optional[int]=37 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : int=20 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=1 , __lowerCAmelCase : Dict=0 , ) -> List[str]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = eos_token_id
_A = pad_token_id
_A = bos_token_id
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
_A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_A = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_A = np.concatenate([input_ids, eos_tensor] , axis=1 )
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_A = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] ) -> List[Any]:
_A = 20
_A = model_class_name(__lowerCAmelCase )
_A = model.encode(inputs_dict['''input_ids'''] )
_A , _A = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_A = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_A = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_A = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_A = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , )
_A = model.decode(__lowerCAmelCase , __lowerCAmelCase )
_A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def snake_case_ ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ) -> str:
_A = 20
_A = model_class_name(__lowerCAmelCase )
_A = model.encode(inputs_dict['''input_ids'''] )
_A , _A = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_A = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_A = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_A = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_A = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_A = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase )
_A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[Any] , _snake_case :Union[str, Any] , _snake_case :Union[str, Any]=None , _snake_case :str=None , ) -> List[str]:
if attention_mask is None:
_A = np.not_equal(_snake_case , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_A = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : List[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a__ : Union[str, Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a__ : Any = True
a__ : Tuple = False
a__ : Optional[int] = False
a__ : Union[str, Any] = False
def snake_case_ ( self : Union[str, Any] ) -> int:
_A = FlaxPegasusModelTester(self )
_A = ConfigTester(self , config_class=__lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Tuple ) -> Union[str, Any]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : List[str] ) -> List[Any]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_A = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Any ):
return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
_A = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A = model_class(__lowerCAmelCase )
_A = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_A = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
return model.decode(
decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , )
with self.subTest('''JIT Enabled''' ):
_A = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case_ ( self : Union[str, Any] ) -> Tuple:
for model_class_name in self.all_model_classes:
_A = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__lowerCAmelCase )
_A = np.ones((1, 1) )
_A = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
def snake_case_ ( self : List[str] ) -> str:
_A = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
_A = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
_A = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_A = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_A = tokenizer(__lowerCAmelCase , return_tensors='''np''' , truncation=__lowerCAmelCase , max_length=5_12 , padding=__lowerCAmelCase )
_A = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences
_A = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
assert tgt_text == decoded
| 2 |
from __future__ import annotations
UpperCamelCase = tuple[int, int, int]
UpperCamelCase = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCamelCase = "EGZWVONAHDCLFQMSIPJBYUKXTR"
UpperCamelCase = "FOBHMDKEXQNRAULPGSJVTYICZW"
UpperCamelCase = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
UpperCamelCase = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
UpperCamelCase = "RMDJXFUWGISLHVTCQNKYPBEZOA"
UpperCamelCase = "SGLCPQWZHKXAREONTFBVIYJUDM"
UpperCamelCase = "HVSICLTYKQUBXDWAJZOMFGPREN"
UpperCamelCase = "RZWQHFMVDBKICJLNTUXAGYPSOE"
UpperCamelCase = "LFKIJODBEGAMQPXVUHYSTCZRWN"
UpperCamelCase = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(SCREAMING_SNAKE_CASE ) )) < 3:
_lowercase : Optional[int] = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
_lowercase , _lowercase , _lowercase : int = rotpos
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : Dict = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : int = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : str = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Validates string and returns dict
_lowercase : Tuple = _plugboard(SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = F"""Plugboard setting isn't type string ({type(SCREAMING_SNAKE_CASE )})"""
raise TypeError(SCREAMING_SNAKE_CASE )
elif len(SCREAMING_SNAKE_CASE ) % 2 != 0:
_lowercase : Optional[int] = F"""Odd number of symbols ({len(SCREAMING_SNAKE_CASE )})"""
raise Exception(SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
_lowercase : Dict = set()
for i in pbstring:
if i not in abc:
_lowercase : str = F"""'{i}' not in list of symbols"""
raise Exception(SCREAMING_SNAKE_CASE )
elif i in tmppbl:
_lowercase : int = F"""Duplicate symbol ({i})"""
raise Exception(SCREAMING_SNAKE_CASE )
else:
tmppbl.add(SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
_lowercase : Optional[Any] = {}
for j in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ):
_lowercase : Dict = pbstring[j + 1]
_lowercase : Union[str, Any] = pbstring[j]
return pb
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , SCREAMING_SNAKE_CASE = "" , ) -> str:
_lowercase : List[str] = text.upper()
_lowercase , _lowercase , _lowercase : List[str] = _validator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , plugb.upper() )
_lowercase , _lowercase , _lowercase : Optional[int] = rotor_position
_lowercase , _lowercase , _lowercase : Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowercase : Optional[int] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowercase : Dict = plugboard[symbol]
# rotor ra --------------------------
_lowercase : Optional[Any] = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : Union[str, Any] = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
_lowercase : Tuple = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : str = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
_lowercase : List[Any] = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : List[str] = rotora[index % len(SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowercase : List[str] = reflector[symbol]
# 2nd rotors
_lowercase : List[str] = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
_lowercase : Tuple = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
_lowercase : Dict = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowercase : int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Any = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : int = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Any = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = "This is my Python script that emulates the Enigma machine from WWII."
UpperCamelCase = (1, 1, 1)
UpperCamelCase = "pictures"
UpperCamelCase = (rotora, rotora, rotora)
UpperCamelCase = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 66 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCAmelCase : Tuple = pd.read_csv('sample_data.csv', header=None)
lowerCAmelCase : Dict = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCAmelCase : int = df.iloc[:, 1:2]
lowerCAmelCase : List[str] = actual_data.values.reshape(len_data, 1)
lowerCAmelCase : int = MinMaxScaler().fit_transform(actual_data)
lowerCAmelCase : Tuple = 10
lowerCAmelCase : str = 5
lowerCAmelCase : str = 20
lowerCAmelCase : Optional[Any] = len_data - periods * look_back
lowerCAmelCase : Optional[int] = actual_data[:division]
lowerCAmelCase : Dict = actual_data[division - look_back :]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = [], []
lowerCAmelCase , lowerCAmelCase : Tuple = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCAmelCase : List[str] = np.array(train_x)
lowerCAmelCase : Optional[int] = np.array(test_x)
lowerCAmelCase : Any = np.array([list(i.ravel()) for i in train_y])
lowerCAmelCase : int = np.array([list(i.ravel()) for i in test_y])
lowerCAmelCase : Tuple = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
lowerCAmelCase : Optional[int] = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
lowerCAmelCase : List[str] = model.predict(x_test)
| 3 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["GLPNFeatureExtractor"]
UpperCamelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class a ( a__ ):
snake_case__ = '''gpt_bigcode'''
snake_case__ = ['''past_key_values''']
snake_case__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _snake_case=5_02_57 , _snake_case=10_24 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=None , _snake_case="gelu_pytorch_tanh" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=1E-5 , _snake_case=0.02 , _snake_case=True , _snake_case=True , _snake_case=5_02_56 , _snake_case=5_02_56 , _snake_case=True , _snake_case=True , _snake_case=True , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = attention_softmax_in_fpaa
lowerCAmelCase = scale_attention_softmax_in_fpaa
lowerCAmelCase = multi_query
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
| 4 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : List[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
_lowercase : List[Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_lowerCAmelCase )
from datasets import load_dataset
_lowercase : Union[str, Any] = load_dataset('nielsr/rvlcdip-demo' )
_lowercase : Any = dataset['train'][0]['image'].convert('RGB' )
_lowercase : List[str] = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : Dict = model(**_lowerCAmelCase )
_lowercase : Any = outputs.logits
_lowercase : str = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , _lowerCAmelCase )
_lowercase : Union[str, Any] = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=_lowerCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 66 | 0 |
'''simple docstring'''
def A (__lowerCamelCase :int = 100 ):
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 5 |
from PIL import Image
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Image:
def brightness(SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 66 | 0 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: int ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE__ = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = str(bin(UpperCamelCase__ ) )[2:]
SCREAMING_SNAKE_CASE__ = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[Any] = torch.nn.Linear(1_0 , 1_0 )
_lowercase : Any = torch.optim.SGD(model.parameters() , 0.1 )
_lowercase : str = Accelerator()
_lowercase : Any = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 66 | 0 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ) -> str:
'''simple docstring'''
_A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
_A = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert('RGB' )
_A = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
_A = transform(_snake_case ).unsqueeze(0 ).to(_snake_case )
return image
def _snake_case ( _snake_case : List[str] ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
_A = re.sub('visual_encoder*' , 'vision_model.encoder' , _snake_case )
if "blocks" in key:
_A = re.sub(R'blocks' , 'layers' , _snake_case )
if "attn" in key:
_A = re.sub(R'attn' , 'self_attn' , _snake_case )
if "norm1" in key:
_A = re.sub(R'norm1' , 'layer_norm1' , _snake_case )
if "norm2" in key:
_A = re.sub(R'norm2' , 'layer_norm2' , _snake_case )
if "encoder.norm" in key:
_A = re.sub(R'encoder.norm' , 'post_layernorm' , _snake_case )
if "encoder.patch_embed.proj" in key:
_A = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , _snake_case )
if "encoder.pos_embed" in key:
_A = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , _snake_case )
if "encoder.cls_token" in key:
_A = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , _snake_case )
if "self_attn" in key:
_A = re.sub(R'self_attn.proj' , 'self_attn.projection' , _snake_case )
return key
@torch.no_grad()
def _snake_case ( _snake_case : Optional[int] , _snake_case : List[str]=None ) -> Any:
'''simple docstring'''
if config_path is not None:
_A = BlipConfig.from_pretrained(_snake_case )
else:
_A = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
_A = BlipForConditionalGeneration(_snake_case ).eval()
_A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
_A = blip_decoder(pretrained=_snake_case , image_size=3_84 , vit='base' )
_A = pt_model.eval()
_A = pt_model.state_dict()
for key in modified_state_dict.copy():
_A = modified_state_dict.pop(_snake_case )
_A = rename_key(_snake_case )
_A = value
hf_model.load_state_dict(_snake_case )
_A = 3_84
_A = load_demo_image(image_size=_snake_case , device='cpu' )
_A = BertTokenizer.from_pretrained('bert-base-uncased' )
_A = tokenizer(['a picture of'] ).input_ids
_A = hf_model.generate(_snake_case , _snake_case )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
_A = hf_model.generate(_snake_case )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_snake_case )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_A = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
_A = blip_vqa(pretrained=_snake_case , image_size=_snake_case , vit='base' )
vqa_model.eval()
_A = vqa_model.state_dict()
for key in modified_state_dict.copy():
_A = modified_state_dict.pop(_snake_case )
_A = rename_key(_snake_case )
_A = value
_A = BlipForQuestionAnswering(_snake_case )
hf_vqa_model.load_state_dict(_snake_case )
_A = ['How many dogs are in this image?']
_A = tokenizer(_snake_case , return_tensors='pt' ).input_ids
_A = hf_vqa_model.generate(_snake_case , _snake_case )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
_A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
_A = blip_itm(pretrained=_snake_case , image_size=_snake_case , vit='base' )
itm_model.eval()
_A = itm_model.state_dict()
for key in modified_state_dict.copy():
_A = modified_state_dict.pop(_snake_case )
_A = rename_key(_snake_case )
_A = value
_A = BlipForImageTextRetrieval(_snake_case )
_A = ['A picture of a woman with a dog sitting in a beach']
_A = tokenizer(
_snake_case , return_tensors='pt' , padding='max_length' , truncation=_snake_case , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_snake_case )
hf_itm_model.eval()
_A = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case )
_A = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
a = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 7 |
import requests
from bsa import BeautifulSoup
def __magic_name__ ( SCREAMING_SNAKE_CASE = "AAPL" ) -> str:
_lowercase : str = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_lowercase : int = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , 'html.parser' )
_lowercase : List[str] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 66 | 0 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = row
__A : Any = col
__A : Union[str, Any] = graph
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__A : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
__A : str = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _UpperCAmelCase):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self): # And finally, count all islands.
'''simple docstring'''
__A : Tuple = [[False for j in range(self.COL)] for i in range(self.ROW)]
__A : Optional[Any] = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
count += 1
return count | 8 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["PoolFormerFeatureExtractor"]
UpperCamelCase = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = "vit_msn"
def __init__( self : Tuple , _snake_case : Any=7_68 , _snake_case : List[str]=12 , _snake_case : Dict=12 , _snake_case : str=30_72 , _snake_case : str="gelu" , _snake_case : Tuple=0.0 , _snake_case : Dict=0.0 , _snake_case : int=0.02 , _snake_case : Any=1E-06 , _snake_case : str=2_24 , _snake_case : List[str]=16 , _snake_case : Optional[int]=3 , _snake_case : Optional[Any]=True , **_snake_case : Dict , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = qkv_bias
| 9 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = LayoutLMTokenizer
_UpperCamelCase : Union[str, Any] = LayoutLMTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = True
def __a ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __a ( self , **_lowerCAmelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : str = 'UNwant\u00E9d,running'
_lowercase : List[Any] = 'unwanted, running'
return input_text, output_text
def __a ( self ):
_lowercase : Dict = self.tokenizer_class(self.vocab_file )
_lowercase : Dict = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [7, 4, 5, 1_0, 8, 9] )
def __a ( self ):
pass
| 66 | 0 |
from functools import lru_cache
def _snake_case ( __snake_case ):
_UpperCamelCase = 2
_UpperCamelCase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _snake_case ( __snake_case ):
return len(unique_prime_factors(__snake_case ) )
def _snake_case ( __snake_case ):
return len(set(__snake_case ) ) in (0, 1)
def _snake_case ( __snake_case ):
_UpperCamelCase = 2
while True:
# Increment each value of a generated range
_UpperCamelCase = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_UpperCamelCase = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _snake_case ( __snake_case = 4 ):
_UpperCamelCase = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 10 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : str = ShapEPipeline
_UpperCamelCase : Any = ["prompt"]
_UpperCamelCase : int = ["prompt"]
_UpperCamelCase : Union[str, Any] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : Optional[Any] = False
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return self.time_input_dim * 4
@property
def __a ( self ):
return 8
@property
def __a ( self ):
_lowercase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_lowerCAmelCase )
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = {
'num_attention_heads': 2,
'attention_head_dim': 1_6,
'embedding_dim': self.time_input_dim,
'num_embeddings': 3_2,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowercase : Optional[Any] = PriorTransformer(**_lowerCAmelCase )
return model
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = {
'param_shapes': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 1_2,
'background': (
0.1,
0.1,
0.1,
),
}
_lowercase : List[Any] = ShapERenderer(**_lowerCAmelCase )
return model
def __a ( self ):
_lowercase : Optional[Any] = self.dummy_prior
_lowercase : Dict = self.dummy_text_encoder
_lowercase : List[str] = self.dummy_tokenizer
_lowercase : Union[str, Any] = self.dummy_renderer
_lowercase : List[str] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
_lowercase : List[str] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : Optional[Any] = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : List[Any] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 3_2,
'output_type': 'np',
}
return inputs
def __a ( self ):
_lowercase : Optional[int] = 'cpu'
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : Tuple = self.pipeline_class(**_lowerCAmelCase )
_lowercase : Union[str, Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Union[str, Any] = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
_lowercase : str = output.images[0]
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
_lowercase : str = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self ):
_lowercase : List[Any] = torch_device == 'cpu'
_lowercase : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def __a ( self ):
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : Optional[int] = self.pipeline_class(**_lowerCAmelCase )
_lowercase : Any = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : str = 1
_lowercase : Optional[int] = 2
_lowercase : List[str] = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
_lowercase : int = batch_size * [inputs[key]]
_lowercase : Optional[int] = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
_lowercase : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
_lowercase : List[str] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Tuple = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowercase : int = pipe(
'a shark' , generator=_lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 66 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A = 600_851_475_143):
"""simple docstring"""
try:
_a = int(__A)
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''')
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''')
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(__A)
if __name__ == "__main__":
print(F"""{solution() = }""")
| 11 |
import sys
UpperCamelCase = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[Any] = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def __magic_name__ ( SCREAMING_SNAKE_CASE = N ) -> int:
_lowercase : Dict = -sys.maxsize - 1
_lowercase : Tuple = n[:13]
_lowercase : List[Any] = 13
while cur_index < len(SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_lowercase : List[str] = substr[1:] + n[cur_index]
cur_index += 1
else:
_lowercase : str = max(SCREAMING_SNAKE_CASE , str_eval(SCREAMING_SNAKE_CASE ) )
_lowercase : Dict = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
lowercase__ : Any = iter(lowercase_ )
while True:
lowercase__ : Any = tuple(itertools.islice(lowercase_ , lowercase_ ) )
if not chunk:
return
yield chunk
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Union[str, Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase__ : Union[str, Any] = """"""
if len(lowercase_ ) < 2:
return dirty
for i in range(len(lowercase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowercase_ ) & 1:
clean += "X"
return clean
def UpperCamelCase ( lowercase_ ) -> list[str]:
'''simple docstring'''
lowercase__ : Optional[Any] = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase__ : List[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowercase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowercase_ )
return table
def UpperCamelCase ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : str = generate_table(lowercase_ )
lowercase__ : Optional[int] = prepare_input(lowercase_ )
lowercase__ : Optional[int] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase_ , 2 ):
lowercase__ , lowercase__ : Dict = divmod(table.index(lowercase_ ) , 5 )
lowercase__ , lowercase__ : Optional[int] = divmod(table.index(lowercase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCamelCase ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : int = generate_table(lowercase_ )
lowercase__ : Tuple = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase_ , 2 ):
lowercase__ , lowercase__ : Optional[int] = divmod(table.index(lowercase_ ) , 5 )
lowercase__ , lowercase__ : int = divmod(table.index(lowercase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 12 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
A__ : Any = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
A__ : Tuple = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase__ ( ) -> Optional[int]:
__lowerCamelCase : Tuple = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__lowerCamelCase : str = bs[:]
__lowerCamelCase : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase_ )
cs.append(2**8 + n )
n += 1
__lowerCamelCase : Union[str, Any] = [chr(UpperCAmelCase_ ) for n in cs]
return dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = set()
__lowerCamelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase : List[str] = char
return pairs
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
__lowerCamelCase : List[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
__lowerCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
__lowerCamelCase : Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
__lowerCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
__lowerCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
__lowerCamelCase : Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase : Optional[Any] = json.load(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = {v: k for k, v in self.encoder.items()}
__lowerCamelCase : List[str] = errors # how to handle errors in decoding
__lowerCamelCase : Dict = bytes_to_unicode()
__lowerCamelCase : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
__lowerCamelCase : Union[str, Any] = merges_handle.read().split('\n' )[1:-1]
__lowerCamelCase : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase : List[Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__lowerCamelCase : Dict = {}
__lowerCamelCase : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase : Tuple = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowercase_ ( self ) -> Dict:
return len(self.encoder )
def lowercase_ ( self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
if token in self.cache:
return self.cache[token]
__lowerCamelCase : str = tuple(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
__lowerCamelCase : str = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase : Dict = bigram
__lowerCamelCase : List[str] = []
__lowerCamelCase : int = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
__lowerCamelCase : Optional[int] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase : int = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase : Tuple = tuple(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
__lowerCamelCase : str = get_pairs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = ' '.join(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = word
return word
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : str = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
__lowerCamelCase : Dict = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
__lowerCamelCase : List[Any] = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : List[str] = [self.cls_token_id]
__lowerCamelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : List[str] = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Dict = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
__lowerCamelCase : List[str] = ' ' + text
return (text, kwargs)
| 13 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : str = ["image_processor", "tokenizer"]
_UpperCamelCase : Union[str, Any] = "AutoImageProcessor"
_UpperCamelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Union[str, Any] = self.image_processor
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if images is not None:
_lowercase : Union[str, Any] = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and images is not None:
_lowercase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 66 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 14 |
from __future__ import annotations
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]:
if num <= 0:
_lowercase : List[str] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = [True] * (num + 1)
_lowercase : Union[str, Any] = []
_lowercase : Dict = 2
_lowercase : Union[str, Any] = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
_lowercase : str = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 66 | 0 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
A : List[str] = logging.get_logger(__name__)
A : Dict[Optional[str], Type[Formatter]] = {}
A : Dict[Optional[str], str] = {}
A : Dict[Optional[str], Exception] = {}
def UpperCamelCase ( __magic_name__ : type , __magic_name__ : Optional[str] , __magic_name__ : Optional[List[str]] = None , ) -> Dict:
"""simple docstring"""
lowercase__ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
lowercase__ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
lowercase__ = format_type
def UpperCamelCase ( __magic_name__ : Exception , __magic_name__ : Optional[str] , __magic_name__ : Optional[List[str]] = None ) -> int:
"""simple docstring"""
lowercase__ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowercase__ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
A : Union[str, Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
A : Union[str, Any] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
A : Optional[int] = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def UpperCamelCase ( __magic_name__ : Optional[str] ) -> Optional[str]:
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCamelCase ( __magic_name__ : Optional[str] , **__magic_name__ : Dict ) -> Formatter:
"""simple docstring"""
lowercase__ = get_format_type_from_alias(__magic_name__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__magic_name__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 15 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : int = 384
if "tiny" in model_name:
_lowercase : Tuple = [3, 3, 9, 3]
_lowercase : List[str] = [96, 192, 384, 768]
if "small" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : Union[str, Any] = [96, 192, 384, 768]
if "base" in model_name:
_lowercase : List[Any] = [3, 3, 27, 3]
_lowercase : Dict = [128, 256, 512, 1_024]
_lowercase : Optional[int] = 512
if "large" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : List[Any] = [192, 384, 768, 1_536]
_lowercase : Tuple = 768
if "xlarge" in model_name:
_lowercase : str = [3, 3, 27, 3]
_lowercase : List[str] = [256, 512, 1_024, 2_048]
_lowercase : Tuple = 1_024
# set label information
_lowercase : Dict = 150
_lowercase : Union[str, Any] = 'huggingface/label-files'
_lowercase : str = 'ade20k-id2label.json'
_lowercase : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_lowercase : Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
_lowercase : List[str] = ConvNextConfig(
depths=SCREAMING_SNAKE_CASE , hidden_sizes=SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_lowercase : Union[str, Any] = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE , auxiliary_in_channels=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , )
return config
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Any = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Any = dct.pop(SCREAMING_SNAKE_CASE )
_lowercase : Any = val
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : List[Any] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
_lowercase : Optional[int] = model_name_to_url[model_name]
_lowercase : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
_lowercase : Optional[int] = get_upernet_config(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowercase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
if "bn" in key:
_lowercase : Any = key.replace('bn' , 'batch_norm' )
_lowercase : Any = val
# rename keys
_lowercase : int = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify on image
_lowercase : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
_lowercase : Tuple = SegformerImageProcessor()
_lowercase : Tuple = processor(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowercase : Dict = model(SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
_lowercase : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
_lowercase : Union[str, Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
_lowercase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
_lowercase : Optional[int] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
_lowercase : str = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 | 0 |
def __a ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(A__ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'{solution() = }') | 16 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = "upernet"
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=3_8_4 , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=2_5_5 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowercase : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[Any] = backbone_config.get('model_type' )
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Tuple = config_class.from_dict(_lowerCAmelCase )
_lowercase : Optional[Any] = backbone_config
_lowercase : Any = hidden_size
_lowercase : Any = initializer_range
_lowercase : Tuple = pool_scales
_lowercase : List[Any] = use_auxiliary_head
_lowercase : Optional[Any] = auxiliary_loss_weight
_lowercase : Any = auxiliary_in_channels
_lowercase : Any = auxiliary_channels
_lowercase : List[str] = auxiliary_num_convs
_lowercase : List[str] = auxiliary_concat_input
_lowercase : Tuple = loss_ignore_index
def __a ( self ):
_lowercase : str = copy.deepcopy(self.__dict__ )
_lowercase : Tuple = self.backbone_config.to_dict()
_lowercase : int = self.__class__.model_type
return output
| 66 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Dict = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['''ViTFeatureExtractor''']
UpperCAmelCase_ : Dict = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowercase : str = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_lowercase : int = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_lowercase : List[Any] = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_SCREAMING_SNAKE_CASE = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Dict = "albert"
def __init__( self , _lowerCAmelCase=30000 , _lowerCAmelCase=128 , _lowerCAmelCase=4096 , _lowerCAmelCase=12 , _lowerCAmelCase=1 , _lowerCAmelCase=64 , _lowerCAmelCase=16384 , _lowerCAmelCase=1 , _lowerCAmelCase="gelu_new" , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase="absolute" , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , **_lowerCAmelCase , ) -> str:
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = embedding_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_hidden_groups
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = inner_group_num
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = classifier_dropout_prob
_lowerCAmelCase = position_embedding_type
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 18 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : int = IFInpaintingSuperResolutionPipeline
_UpperCamelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
_UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {"latents"}
def __a ( self ):
return self._get_superresolution_dummy_components()
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : int = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Union[str, Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __a ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __a ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __a ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ):
self._test_save_load_local()
def __a ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 66 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 42
lowercase__ = 42
def __init__( self , __a , __a) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a)
@torch.no_grad()
def __call__( self , __a = 1 , __a = 20_00 , __a = None , __a = "pil" , __a = True , **__a , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
_UpperCamelCase = self.unet.config.sample_size
_UpperCamelCase = (batch_size, 3, img_size, img_size)
_UpperCamelCase = self.unet
_UpperCamelCase = randn_tensor(__a , generator=__a) * self.scheduler.init_noise_sigma
_UpperCamelCase = sample.to(self.device)
self.scheduler.set_timesteps(__a)
self.scheduler.set_sigmas(__a)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
_UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
_UpperCamelCase = self.unet(__a , __a).sample
_UpperCamelCase = self.scheduler.step_correct(__a , __a , generator=__a).prev_sample
# prediction step
_UpperCamelCase = model(__a , __a).sample
_UpperCamelCase = self.scheduler.step_pred(__a , __a , __a , generator=__a)
_UpperCamelCase , _UpperCamelCase = output.prev_sample, output.prev_sample_mean
_UpperCamelCase = sample_mean.clamp(0 , 1)
_UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__a)
| 19 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[float, float]:
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_lowercase , _lowercase , _lowercase : Tuple = equationa
_lowercase , _lowercase , _lowercase : Dict = equationa
# Calculate the determinants of the matrices
_lowercase : str = aa * ba - aa * ba
_lowercase : Any = ca * ba - ca * ba
_lowercase : Optional[int] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase : Union[str, Any] = determinant_x / determinant
_lowercase : Tuple = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 66 | 0 |
_lowerCAmelCase: Tuple = 'Alexander Joslin'
import operator as op
from .stack import Stack
def _lowercase( __a : str ):
a__ ={'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
a__ =Stack()
a__ =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__a ) )
elif i in operators:
# RULE 2
operator_stack.push(__a )
elif i == ")":
# RULE 4
a__ =operator_stack.peek()
operator_stack.pop()
a__ =operand_stack.peek()
operand_stack.pop()
a__ =operand_stack.peek()
operand_stack.pop()
a__ =operators[opr](__a , __a )
operand_stack.push(__a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_lowerCAmelCase: Dict = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 |
def __magic_name__ ( SCREAMING_SNAKE_CASE = 50 ) -> int:
_lowercase : Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class __A :
def __init__( self :Any , __snake_case :Optional[Any] , __snake_case :Optional[int] , __snake_case :bool = True , __snake_case :bool = False ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =scheduler
__magic_name__ : Optional[Any] =optimizers if isinstance(__snake_case , (list, tuple) ) else [optimizers]
__magic_name__ : Any =split_batches
__magic_name__ : List[str] =step_with_optimizer
__magic_name__ : Dict =GradientState()
def A__ ( self :List[str] , *__snake_case :Optional[Any] , **__snake_case :Any ):
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__snake_case , **__snake_case )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__snake_case , **__snake_case )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__magic_name__ : str =AcceleratorState().num_processes
for _ in range(__snake_case ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__snake_case , **__snake_case )
else:
self.scheduler.step(*__snake_case , **__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return self.scheduler.get_last_lr()
def A__ ( self :Tuple ):
'''simple docstring'''
return self.scheduler.state_dict()
def A__ ( self :Union[str, Any] , __snake_case :Dict ):
'''simple docstring'''
self.scheduler.load_state_dict(__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
return self.scheduler.get_lr()
def A__ ( self :Any , *__snake_case :int , **__snake_case :Union[str, Any] ):
'''simple docstring'''
return self.scheduler.print_lr(*__snake_case , **__snake_case )
| 21 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConvNextFeatureExtractor"]
UpperCamelCase = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 0 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : str ):
'''simple docstring'''
_a = int(UpperCamelCase )
# Initialize Result
_a = []
# Traverse through all denomination
for denomination in reversed(UpperCamelCase ):
# Find denominations
while int(UpperCamelCase ) >= int(UpperCamelCase ):
total_value -= int(UpperCamelCase )
answer.append(UpperCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_snake_case : Tuple = []
_snake_case : Tuple = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
_snake_case : Union[str, Any] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
_snake_case : Optional[int] = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
_snake_case : Optional[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
_snake_case : List[str] = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F'''Following is minimal change for {value}: ''')
_snake_case : Tuple = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 22 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
_lowercase : Optional[Any] = 4
_lowercase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
_lowercase : Union[str, Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 66 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case (__lowercase):
UpperCamelCase_ = FileLock(str(tmpdir / 'foo.lock'))
UpperCamelCase_ = FileLock(str(tmpdir / 'foo.lock'))
UpperCamelCase_ = 0.01
with locka.acquire():
with pytest.raises(__lowercase):
UpperCamelCase_ = time.time()
locka.acquire(__lowercase)
assert time.time() - _start > timeout
def _snake_case (__lowercase):
UpperCamelCase_ = 'a' * 1000 + '.lock'
UpperCamelCase_ = FileLock(str(tmpdir / filename))
assert locka._lock_file.endswith('.lock')
assert not locka._lock_file.endswith(__lowercase)
assert len(os.path.basename(locka._lock_file)) <= 255
UpperCamelCase_ = FileLock(tmpdir / filename)
with locka.acquire():
with pytest.raises(__lowercase):
locka.acquire(0)
| 23 |
import random
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> dict:
_lowercase : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE )
return graph
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict:
return {
i: [j for j in range(SCREAMING_SNAKE_CASE ) if i != j] for i in range(SCREAMING_SNAKE_CASE )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 0 |
'''simple docstring'''
import os
def _UpperCamelCase ()-> str:
'''simple docstring'''
__snake_case = os.path.join(os.path.dirname(_lowerCamelCase ) , '''num.txt''' )
with open(_lowerCamelCase ) as file_hand:
return str(sum(int(_lowerCamelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 24 |
from __future__ import annotations
UpperCamelCase = tuple[int, int, int]
UpperCamelCase = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCamelCase = "EGZWVONAHDCLFQMSIPJBYUKXTR"
UpperCamelCase = "FOBHMDKEXQNRAULPGSJVTYICZW"
UpperCamelCase = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
UpperCamelCase = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
UpperCamelCase = "RMDJXFUWGISLHVTCQNKYPBEZOA"
UpperCamelCase = "SGLCPQWZHKXAREONTFBVIYJUDM"
UpperCamelCase = "HVSICLTYKQUBXDWAJZOMFGPREN"
UpperCamelCase = "RZWQHFMVDBKICJLNTUXAGYPSOE"
UpperCamelCase = "LFKIJODBEGAMQPXVUHYSTCZRWN"
UpperCamelCase = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(SCREAMING_SNAKE_CASE ) )) < 3:
_lowercase : Optional[int] = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
_lowercase , _lowercase , _lowercase : int = rotpos
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : Dict = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : int = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : str = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Validates string and returns dict
_lowercase : Tuple = _plugboard(SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = F"""Plugboard setting isn't type string ({type(SCREAMING_SNAKE_CASE )})"""
raise TypeError(SCREAMING_SNAKE_CASE )
elif len(SCREAMING_SNAKE_CASE ) % 2 != 0:
_lowercase : Optional[int] = F"""Odd number of symbols ({len(SCREAMING_SNAKE_CASE )})"""
raise Exception(SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
_lowercase : Dict = set()
for i in pbstring:
if i not in abc:
_lowercase : str = F"""'{i}' not in list of symbols"""
raise Exception(SCREAMING_SNAKE_CASE )
elif i in tmppbl:
_lowercase : int = F"""Duplicate symbol ({i})"""
raise Exception(SCREAMING_SNAKE_CASE )
else:
tmppbl.add(SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
_lowercase : Optional[Any] = {}
for j in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ):
_lowercase : Dict = pbstring[j + 1]
_lowercase : Union[str, Any] = pbstring[j]
return pb
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , SCREAMING_SNAKE_CASE = "" , ) -> str:
_lowercase : List[str] = text.upper()
_lowercase , _lowercase , _lowercase : List[str] = _validator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , plugb.upper() )
_lowercase , _lowercase , _lowercase : Optional[int] = rotor_position
_lowercase , _lowercase , _lowercase : Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowercase : Optional[int] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowercase : Dict = plugboard[symbol]
# rotor ra --------------------------
_lowercase : Optional[Any] = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : Union[str, Any] = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
_lowercase : Tuple = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : str = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
_lowercase : List[Any] = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : List[str] = rotora[index % len(SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowercase : List[str] = reflector[symbol]
# 2nd rotors
_lowercase : List[str] = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
_lowercase : Tuple = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
_lowercase : Dict = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowercase : int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Any = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : int = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Any = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = "This is my Python script that emulates the Enigma machine from WWII."
UpperCamelCase = (1, 1, 1)
UpperCamelCase = "pictures"
UpperCamelCase = (rotora, rotora, rotora)
UpperCamelCase = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 66 | 0 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList([src_layers[i] for i in layers_to_copy])
assert len(_a) == len(_a), f"{len(_a)} != {len(_a)}"
dest_layers.load_state_dict(layers_to_copy.state_dict())
a_ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a_ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowerCamelCase__ ( _a , _a):
try:
SCREAMING_SNAKE_CASE : Any = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
f" {n_student}")
return list(range(_a))
def lowerCamelCase__ ( _a , _a):
if n_student > n_teacher:
raise ValueError(f"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}")
elif n_teacher == n_student:
return list(range(_a))
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowerCamelCase__ ( _a , _a = "student" , _a = None , _a = None , _a=False , _a=None , _a=None , **_a , ):
SCREAMING_SNAKE_CASE : Dict = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(_a , _a):
AutoTokenizer.from_pretrained(_a).save_pretrained(_a) # purely for convenience
SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM.from_pretrained(_a).eval()
else:
assert isinstance(_a , _a), f"teacher must be a model or string got type {type(_a)}"
SCREAMING_SNAKE_CASE : Optional[int] = teacher.config.to_diff_dict()
try:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
SCREAMING_SNAKE_CASE : List[str] = teacher_e
if d is None:
SCREAMING_SNAKE_CASE : Optional[Any] = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d})
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers"):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
SCREAMING_SNAKE_CASE : Optional[Any] = teacher_e
if d is None:
SCREAMING_SNAKE_CASE : List[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers"):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d})
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d})
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_a)
# Copy weights
SCREAMING_SNAKE_CASE : Tuple = teacher.config_class(**_a)
SCREAMING_SNAKE_CASE : Any = AutoModelForSeqaSeqLM.from_config(_a)
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
SCREAMING_SNAKE_CASE : List[str] = student.load_state_dict(teacher.state_dict() , strict=_a)
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = list(range(_a)), list(range(_a))
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
f" {save_path}")
student.save_pretrained(_a)
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
SCREAMING_SNAKE_CASE : List[int] = pick_layers_to_copy(_a , _a)
if d_layers_to_copy is None:
SCREAMING_SNAKE_CASE : List[int] = pick_layers_to_copy(_a , _a)
try:
if hasattr(
_a , "prophetnet"): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _a)
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _a)
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _a)
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _a)
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _a)
copy_layers(teacher.decoder.block , student.decoder.block , _a)
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}")
SCREAMING_SNAKE_CASE : Optional[int] = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(_a)
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers) | 25 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["GLPNFeatureExtractor"]
UpperCamelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__UpperCamelCase = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _A :
def __init__( self : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=16 , __magic_name__ : Optional[Any]=13 , __magic_name__ : List[str]=7 , __magic_name__ : Union[str, Any]=14 , __magic_name__ : Any=10 , __magic_name__ : Tuple=19 , __magic_name__ : int=5 , __magic_name__ : Any=4 , __magic_name__ : Tuple=True , __magic_name__ : int=16 , __magic_name__ : Any=2 , __magic_name__ : List[str]=4 , __magic_name__ : str=4 , __magic_name__ : Any="gelu" , __magic_name__ : int=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Optional[int]=[1, 2, 3, 4, 5] , __magic_name__ : Union[str, Any]=25 , __magic_name__ : Union[str, Any]=5 , ) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = d_model
__snake_case : int = parent
__snake_case : str = batch_size
__snake_case : Optional[Any] = prediction_length
__snake_case : str = context_length
__snake_case : str = cardinality
__snake_case : List[Any] = num_time_features
__snake_case : Union[str, Any] = lags_sequence
__snake_case : str = embedding_dimension
__snake_case : Optional[int] = is_training
__snake_case : Tuple = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Any = hidden_act
__snake_case : str = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : str = context_length
__snake_case : int = prediction_length + label_length
__snake_case : Optional[Any] = label_length
__snake_case : List[str] = moving_average
__snake_case : str = autocorrelation_factor
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : List[Any] = config.context_length + max(config.lags_sequence )
__snake_case : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__snake_case : int = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__snake_case : List[str] = floats_tensor([self.batch_size, _past_length] )
__snake_case : List[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__snake_case : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__snake_case : Tuple = floats_tensor([self.batch_size, config.prediction_length] )
__snake_case : Union[str, Any] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.get_config()
__snake_case : Any = self.prepare_autoformer_inputs_dict(__magic_name__ )
return config, inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case , __snake_case : str = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = AutoformerModel(config=__magic_name__ ).to(__magic_name__ ).eval()
__snake_case : Tuple = model(**__magic_name__ )
__snake_case : Optional[int] = outputs.encoder_last_hidden_state
__snake_case : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[str] = model.get_encoder()
encoder.save_pretrained(__magic_name__ )
__snake_case : List[str] = AutoformerEncoder.from_pretrained(__magic_name__ ).to(__magic_name__ )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : List[str] = model.create_network_inputs(**__magic_name__ )
__snake_case , __snake_case : List[str] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__snake_case : Union[str, Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__snake_case : Optional[int] = encoder(inputs_embeds=__magic_name__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__snake_case : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__snake_case : Any = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__snake_case : Optional[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__snake_case : Optional[int] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[Any] = model.get_decoder()
decoder.save_pretrained(__magic_name__ )
__snake_case : Dict = AutoformerDecoder.from_pretrained(__magic_name__ ).to(__magic_name__ )
__snake_case : str = decoder(
trend=__magic_name__ , inputs_embeds=__magic_name__ , encoder_hidden_states=__magic_name__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Tuple = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowercase__: int = (AutoformerForPrediction,) if is_torch_available() else ()
lowercase__: int = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
lowercase__: int = False
lowercase__: Optional[int] = False
lowercase__: Dict = False
lowercase__: List[str] = False
lowercase__: int = False
lowercase__: List[str] = False
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = AutoformerModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__snake_case : int = model_class(__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
__snake_case , __snake_case : str = model_class.from_pretrained(__magic_name__ , output_loading_info=__magic_name__ )
self.assertEqual(info["""missing_keys"""] , [] )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__magic_name__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowercase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = inspect.signature(getattr(__magic_name__ , """forward""" ) )
# The main input is the name of the argument after `self`
__snake_case : List[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __magic_name__ )
def lowercase__ ( self : int ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = model_class(__magic_name__ )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Tuple = [*signature.parameters.keys()]
__snake_case : Any = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__magic_name__ )] , __magic_name__ )
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Any = True
__snake_case : Union[str, Any] = getattr(self.model_tester , """seq_length""" , __magic_name__ )
__snake_case : str = getattr(self.model_tester , """decoder_seq_length""" , __magic_name__ )
__snake_case : Tuple = getattr(self.model_tester , """encoder_seq_length""" , __magic_name__ )
__snake_case : Tuple = getattr(self.model_tester , """d_model""" , __magic_name__ )
__snake_case : str = getattr(self.model_tester , """num_attention_heads""" , __magic_name__ )
__snake_case : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__snake_case : Optional[int] = True
__snake_case : List[Any] = False
__snake_case : Optional[Any] = True
__snake_case : List[str] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : str = True
__snake_case : Optional[Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : List[Any] = outputs.encoder_attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__snake_case : Union[str, Any] = len(__magic_name__ )
__snake_case : Dict = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__magic_name__ , __magic_name__ )
# decoder attentions
__snake_case : Union[str, Any] = outputs.decoder_attentions
self.assertIsInstance(__magic_name__ , (list, tuple) )
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__snake_case : str = outputs.cross_attentions
self.assertIsInstance(__magic_name__ , (list, tuple) )
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__snake_case : str = True
__snake_case : int = True
__snake_case : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Optional[int] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 2 , len(__magic_name__ ) )
__snake_case : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def _a ( _lowerCamelCase="train-batch.pt" ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCamelCase , repo_type="""dataset""" )
__snake_case : Union[str, Any] = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : int = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__magic_name__ )
__snake_case : str = prepare_batch()
with torch.no_grad():
__snake_case : Any = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__snake_case : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __magic_name__ )
__snake_case : Any = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__magic_name__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__magic_name__ )
__snake_case : Optional[Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__snake_case : Optional[int] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__snake_case : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __magic_name__ )
__snake_case : Tuple = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__magic_name__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Any = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__magic_name__ )
__snake_case : Tuple = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__snake_case : Any = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__snake_case : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __magic_name__ )
__snake_case : int = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__magic_name__ )
__snake_case : List[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __magic_name__ , rtol=1E-1 ) )
| 26 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : List[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
_lowercase : List[Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_lowerCAmelCase )
from datasets import load_dataset
_lowercase : Union[str, Any] = load_dataset('nielsr/rvlcdip-demo' )
_lowercase : Any = dataset['train'][0]['image'].convert('RGB' )
_lowercase : List[str] = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : Dict = model(**_lowerCAmelCase )
_lowercase : Any = outputs.logits
_lowercase : str = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , _lowerCAmelCase )
_lowercase : Union[str, Any] = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=_lowerCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 66 | 0 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase__ ( self ):
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_A = 'xvjiarui/stable-diffusion-2-inpainting'
_A, _A = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case_ , safety_checker=snake_case_ )
_A = 'Face of a yellow cat, high resolution, sitting on a park bench'
_A = jax.random.PRNGKey(0 )
_A = 50
_A = jax.device_count()
_A = num_samples * [prompt]
_A = num_samples * [init_image]
_A = num_samples * [mask_image]
_A, _A, _A = pipeline.prepare_inputs(snake_case_ , snake_case_ , snake_case_ )
# shard inputs and rng
_A = replicate(snake_case_ )
_A = jax.random.split(snake_case_ , jax.device_count() )
_A = shard(snake_case_ )
_A = shard(snake_case_ )
_A = shard(snake_case_ )
_A = pipeline(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , jit=snake_case_ )
_A = output.images.reshape(snake_case_ , 512 , 512 , 3 )
_A = images[0, 253:256, 253:256, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 27 |
from PIL import Image
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Image:
def brightness(SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 66 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase__( __UpperCamelCase: list[int] ): # This function is recursive
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = len(__UpperCamelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
SCREAMING_SNAKE_CASE : Optional[Any] = array[0]
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[int] = [element for element in array[i:] if element >= array[i]]
SCREAMING_SNAKE_CASE : List[str] = longest_subsequence(__UpperCamelCase )
if len(__UpperCamelCase ) > len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = temp_array
else:
i += 1
SCREAMING_SNAKE_CASE : Tuple = [element for element in array[1:] if element >= pivot]
SCREAMING_SNAKE_CASE : Optional[Any] = [pivot, *longest_subsequence(__UpperCamelCase )]
if len(__UpperCamelCase ) > len(__UpperCamelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[Any] = torch.nn.Linear(1_0 , 1_0 )
_lowercase : Any = torch.optim.SGD(model.parameters() , 0.1 )
_lowercase : str = Accelerator()
_lowercase : Any = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 66 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Optional[Any] = PhobertTokenizer
a__: Optional[Any] = False
def UpperCAmelCase__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
lowerCamelCase_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowerCamelCase_ = ['''#version: 0.2''', '''l à</w>''']
lowerCamelCase_ = {'''unk_token''': '''<unk>'''}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = '''Tôi là VinAI Research'''
lowerCamelCase_ = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = '''Tôi là VinAI Research'''
lowerCamelCase_ = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
print(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
| 29 |
import requests
from bsa import BeautifulSoup
def __magic_name__ ( SCREAMING_SNAKE_CASE = "AAPL" ) -> str:
_lowercase : str = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_lowercase : int = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , 'html.parser' )
_lowercase : List[str] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 66 | 0 |
import enum
import shutil
import sys
__a ,__a = shutil.get_terminal_size()
__a = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class __a( enum.Enum ):
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = 1
def lowerCamelCase__ ( _lowercase , _lowercase="" ):
'''simple docstring'''
sys.stdout.write(str(_lowercase ) + end )
sys.stdout.flush()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase="" ):
'''simple docstring'''
forceWrite(f'''\u001b[{color}m{content}\u001b[0m''' , _lowercase )
def lowerCamelCase__ ( ):
'''simple docstring'''
forceWrite('''\r''' )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
forceWrite(f'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def lowerCamelCase__ ( ):
'''simple docstring'''
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def lowerCamelCase__ ( ):
'''simple docstring'''
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH ) | 30 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["PoolFormerFeatureExtractor"]
UpperCamelCase = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 0 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = jnp.floataa
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = hidden_states.shape
SCREAMING_SNAKE_CASE_ = jax.image.resize(
_lowerCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
SCREAMING_SNAKE_CASE_ = self.conv(_lowerCAmelCase )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = jnp.floataa
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , _lowerCAmelCase : int ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
SCREAMING_SNAKE_CASE_ = self.conv(_lowerCAmelCase )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = None
lowercase_ = 0.0
lowercase_ = None
lowercase_ = jnp.floataa
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = nn.Dense(_lowerCAmelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE_ = nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE_ = None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=True ):
SCREAMING_SNAKE_CASE_ = hidden_states
SCREAMING_SNAKE_CASE_ = self.norma(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.swish(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conva(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.time_emb_proj(nn.swish(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = jnp.expand_dims(jnp.expand_dims(_lowerCAmelCase , 1 ) , 1 )
SCREAMING_SNAKE_CASE_ = hidden_states + temb
SCREAMING_SNAKE_CASE_ = self.norma(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.swish(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dropout(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conva(_lowerCAmelCase )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE_ = self.conv_shortcut(_lowerCAmelCase )
return hidden_states + residual | 31 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = LayoutLMTokenizer
_UpperCamelCase : Union[str, Any] = LayoutLMTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = True
def __a ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __a ( self , **_lowerCAmelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : str = 'UNwant\u00E9d,running'
_lowercase : List[Any] = 'unwanted, running'
return input_text, output_text
def __a ( self ):
_lowercase : Dict = self.tokenizer_class(self.vocab_file )
_lowercase : Dict = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [7, 4, 5, 1_0, 8, 9] )
def __a ( self ):
pass
| 66 | 0 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : str = ShapEPipeline
_UpperCamelCase : Any = ["prompt"]
_UpperCamelCase : int = ["prompt"]
_UpperCamelCase : Union[str, Any] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : Optional[Any] = False
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return self.time_input_dim * 4
@property
def __a ( self ):
return 8
@property
def __a ( self ):
_lowercase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_lowerCAmelCase )
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = {
'num_attention_heads': 2,
'attention_head_dim': 1_6,
'embedding_dim': self.time_input_dim,
'num_embeddings': 3_2,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowercase : Optional[Any] = PriorTransformer(**_lowerCAmelCase )
return model
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = {
'param_shapes': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 1_2,
'background': (
0.1,
0.1,
0.1,
),
}
_lowercase : List[Any] = ShapERenderer(**_lowerCAmelCase )
return model
def __a ( self ):
_lowercase : Optional[Any] = self.dummy_prior
_lowercase : Dict = self.dummy_text_encoder
_lowercase : List[str] = self.dummy_tokenizer
_lowercase : Union[str, Any] = self.dummy_renderer
_lowercase : List[str] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
_lowercase : List[str] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : Optional[Any] = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : List[Any] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 3_2,
'output_type': 'np',
}
return inputs
def __a ( self ):
_lowercase : Optional[int] = 'cpu'
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : Tuple = self.pipeline_class(**_lowerCAmelCase )
_lowercase : Union[str, Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Union[str, Any] = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
_lowercase : str = output.images[0]
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
_lowercase : str = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self ):
_lowercase : List[Any] = torch_device == 'cpu'
_lowercase : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def __a ( self ):
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : Optional[int] = self.pipeline_class(**_lowerCAmelCase )
_lowercase : Any = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : str = 1
_lowercase : Optional[int] = 2
_lowercase : List[str] = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
_lowercase : int = batch_size * [inputs[key]]
_lowercase : Optional[int] = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
_lowercase : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
_lowercase : List[str] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Tuple = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowercase : int = pipe(
'a shark' , generator=_lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 66 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = inspect.getfile(accelerate.test_utils )
snake_case__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
snake_case__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
snake_case__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
snake_case__ = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = Accelerator()
lowerCamelCase__ : Union[str, Any] = (accelerator.state.process_index + 2, 1_0)
lowerCamelCase__ : List[str] = torch.randint(0, 1_0, shape).to(accelerator.device)
lowerCamelCase__ : Union[str, Any] = """"""
lowerCamelCase__ : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCamelCase__ : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCamelCase__ : str = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 33 |
import sys
UpperCamelCase = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[Any] = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def __magic_name__ ( SCREAMING_SNAKE_CASE = N ) -> int:
_lowercase : Dict = -sys.maxsize - 1
_lowercase : Tuple = n[:13]
_lowercase : List[Any] = 13
while cur_index < len(SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_lowercase : List[str] = substr[1:] + n[cur_index]
cur_index += 1
else:
_lowercase : str = max(SCREAMING_SNAKE_CASE , str_eval(SCREAMING_SNAKE_CASE ) )
_lowercase : Dict = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 | 0 |
"""simple docstring"""
import random
class snake_case_ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> tuple[list[int], list[int]]:
UpperCamelCase = [ord(lowerCamelCase_) for i in text]
UpperCamelCase = []
UpperCamelCase = []
for i in plain:
UpperCamelCase = random.randint(1 , 3_0_0)
UpperCamelCase = (i + k) * k
cipher.append(lowerCamelCase_)
key.append(lowerCamelCase_)
return cipher, key
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = []
for i in range(len(lowerCamelCase_)):
UpperCamelCase = int((cipher[i] - (key[i]) ** 2) / key[i])
plain.append(chr(lowerCamelCase_))
return "".join(lowerCamelCase_)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k)) | 34 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self : Dict , _lowercase : Optional[int] , _lowercase : List[str]=13 , _lowercase : Dict=7 , _lowercase : Any=True , _lowercase : Union[str, Any]=True , _lowercase : int=True , _lowercase : Union[str, Any]=True , _lowercase : List[str]=99 , _lowercase : Optional[Any]=32 , _lowercase : Optional[Any]=5 , _lowercase : Optional[int]=4 , _lowercase : Optional[int]=37 , _lowercase : Tuple="gelu" , _lowercase : Optional[Any]=0.1 , _lowercase : List[str]=0.1 , _lowercase : Tuple=5_12 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=2 , _lowercase : int=0.02 , _lowercase : Union[str, Any]=4 , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : Dict = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = seq_length
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_attention_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = num_choices
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Any = FlaxAlbertModelTester(self )
@slow
def lowercase__ ( self : List[str] ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class_name.from_pretrained('''albert-base-v2''' )
SCREAMING_SNAKE_CASE__ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
SCREAMING_SNAKE_CASE__ : List[str] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , attention_mask=_lowercase )[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = (1, 11, 7_68)
self.assertEqual(output.shape , _lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 35 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : str = ["image_processor", "tokenizer"]
_UpperCamelCase : Union[str, Any] = "AutoImageProcessor"
_UpperCamelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Union[str, Any] = self.image_processor
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if images is not None:
_lowercase : Union[str, Any] = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and images is not None:
_lowercase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 66 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=99 ,SCREAMING_SNAKE_CASE_=13 ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=9 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=5 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=37 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.0_02 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,):
'''simple docstring'''
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : Union[str, Any] = encoder_seq_length
snake_case : Optional[int] = decoder_seq_length
# For common tests
snake_case : str = self.decoder_seq_length
snake_case : Dict = is_training
snake_case : Any = use_attention_mask
snake_case : List[Any] = use_labels
snake_case : Dict = vocab_size
snake_case : Dict = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = d_ff
snake_case : Dict = relative_attention_num_buckets
snake_case : List[Any] = dropout_rate
snake_case : int = initializer_factor
snake_case : int = eos_token_id
snake_case : str = pad_token_id
snake_case : List[str] = decoder_start_token_id
snake_case : Optional[int] = None
snake_case : Optional[Any] = decoder_layers
def snake_case_ ( self ):
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,):
'''simple docstring'''
if attention_mask is None:
snake_case : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case : Optional[int] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case : int = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=SCREAMING_SNAKE_CASE_ )
if decoder_head_mask is None:
snake_case : int = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=SCREAMING_SNAKE_CASE_ )
if cross_attn_head_mask is None:
snake_case : Any = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=SCREAMING_SNAKE_CASE_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case : Optional[Any] = input_ids.clamp(self.pad_token_id + 1 )
snake_case : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case : Optional[int] = self.get_config()
snake_case : Any = config.num_attention_heads
snake_case : List[str] = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return config, input_dict
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case_ ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def snake_case_ ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Tuple = UMTaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : Tuple = model(
input_ids=SCREAMING_SNAKE_CASE_ ,decoder_input_ids=SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,decoder_attention_mask=SCREAMING_SNAKE_CASE_ ,)
snake_case : Optional[Any] = model(input_ids=SCREAMING_SNAKE_CASE_ ,decoder_input_ids=SCREAMING_SNAKE_CASE_ )
snake_case : Any = result.last_hidden_state
snake_case : Dict = result.past_key_values
snake_case : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : int = UMTaModel(config=SCREAMING_SNAKE_CASE_ ).get_decoder().to(SCREAMING_SNAKE_CASE_ ).eval()
# first forward pass
snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ ,use_cache=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = model(SCREAMING_SNAKE_CASE_ ,use_cache=SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) + 1 )
snake_case , snake_case : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : List[str] = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
snake_case : int = torch.cat([input_ids, next_tokens] ,dim=-1 )
snake_case : List[str] = model(SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
snake_case : List[Any] = model(SCREAMING_SNAKE_CASE_ ,past_key_values=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
# select random slice
snake_case : Dict = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
snake_case : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,atol=1E-3 ) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = UMTaModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).half().eval()
snake_case : int = model(**SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE_ ).any().item() )
@require_torch
class _A ( snake_case , snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__lowerCamelCase : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict = True
__lowerCamelCase : str = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__lowerCamelCase : Union[str, Any] = [0.8, 0.9]
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
snake_case : List[Any] = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
SCREAMING_SNAKE_CASE_ ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=SCREAMING_SNAKE_CASE_ ,opset_version=9 ,input_names=["""input_ids""", """decoder_input_ids"""] ,)
@unittest.skipIf(torch_device == """cpu""" ,"""Cant do half precision""" )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
snake_case : Any = self.model_tester.prepare_config_and_inputs()
snake_case : Dict = config_and_inputs[0]
snake_case : str = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval()
model.to(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = {
"""head_mask""": torch.zeros(config.num_layers ,config.num_heads ,device=SCREAMING_SNAKE_CASE_ ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers ,config.num_heads ,device=SCREAMING_SNAKE_CASE_ ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers ,config.num_heads ,device=SCREAMING_SNAKE_CASE_ ),
}
for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE_ ,head_masking.items() ):
snake_case : Any = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case : Optional[int] = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = model.generate(
config_and_inputs[1]["""input_ids"""] ,num_beams=1 ,max_length=3 ,output_attentions=SCREAMING_SNAKE_CASE_ ,return_dict_in_generate=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case : List[Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" ,return_dict=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""google/umt5-small""" ,use_fast=SCREAMING_SNAKE_CASE_ ,legacy=SCREAMING_SNAKE_CASE_ )
snake_case : str = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
snake_case : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ,padding=SCREAMING_SNAKE_CASE_ ).input_ids
# fmt: off
snake_case : Optional[int] = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : str = model.generate(input_ids.to(SCREAMING_SNAKE_CASE_ ) )
snake_case : Optional[Any] = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
snake_case : Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 |
from __future__ import annotations
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]:
if num <= 0:
_lowercase : List[str] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = [True] * (num + 1)
_lowercase : Union[str, Any] = []
_lowercase : Dict = 2
_lowercase : Union[str, Any] = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
_lowercase : str = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 66 | 0 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_ ( __a , __a , __a ) -> int:
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def UpperCamelCase_ ( __a , __a , __a = None ) -> List[Any]:
a__ : Union[str, Any] = tesseract_config if tesseract_config is not None else ""
# apply OCR
a__ : Any = to_pil_image(__a )
a__, a__ : Dict = pil_image.size
a__ : Optional[Any] = pytesseract.image_to_data(__a , lang=__a , output_type="dict" , config=__a )
a__, a__, a__, a__, a__ : str = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
a__ : Optional[int] = [idx for idx, word in enumerate(__a ) if not word.strip()]
a__ : Any = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
a__ : Any = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
a__ : int = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
a__ : Tuple = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
a__ : int = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
a__ : Optional[int] = []
for x, y, w, h in zip(__a , __a , __a , __a ):
a__ : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
a__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a , __a , __a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A__ ( A__ ):
"""simple docstring"""
_lowercase = ['pixel_values']
def __init__( self : Any , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = "" , **lowerCamelCase__ : Optional[int] , ):
super().__init__(**lowerCamelCase__ )
a__ : Tuple = size if size is not None else {"height": 224, "width": 224}
a__ : Optional[Any] = get_size_dict(lowerCamelCase__ )
a__ : Any = do_resize
a__ : Any = size
a__ : Dict = resample
a__ : str = apply_ocr
a__ : Union[str, Any] = ocr_lang
a__ : Any = tesseract_config
def _UpperCamelCase( self : Dict , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[int] , ):
a__ : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
a__ : Optional[int] = (size["height"], size["width"])
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : int , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : Any , ):
a__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
a__ : Dict = size if size is not None else self.size
a__ : str = get_size_dict(lowerCamelCase__ )
a__ : int = resample if resample is not None else self.resample
a__ : Dict = apply_ocr if apply_ocr is not None else self.apply_ocr
a__ : Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
a__ : Optional[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
a__ : str = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
a__ : Dict = [to_numpy_array(lowerCamelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
a__ : Dict = []
a__ : Optional[int] = []
for image in images:
a__, a__ : Union[str, Any] = apply_tesseract(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
words_batch.append(lowerCamelCase__ )
boxes_batch.append(lowerCamelCase__ )
if do_resize:
a__ : Tuple = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
a__ : Union[str, Any] = [flip_channel_order(lowerCamelCase__ ) for image in images]
a__ : Optional[int] = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
a__ : Tuple = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCamelCase__ )
if apply_ocr:
a__ : List[Any] = words_batch
a__ : Union[str, Any] = boxes_batch
return data
| 37 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : int = 384
if "tiny" in model_name:
_lowercase : Tuple = [3, 3, 9, 3]
_lowercase : List[str] = [96, 192, 384, 768]
if "small" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : Union[str, Any] = [96, 192, 384, 768]
if "base" in model_name:
_lowercase : List[Any] = [3, 3, 27, 3]
_lowercase : Dict = [128, 256, 512, 1_024]
_lowercase : Optional[int] = 512
if "large" in model_name:
_lowercase : List[str] = [3, 3, 27, 3]
_lowercase : List[Any] = [192, 384, 768, 1_536]
_lowercase : Tuple = 768
if "xlarge" in model_name:
_lowercase : str = [3, 3, 27, 3]
_lowercase : List[str] = [256, 512, 1_024, 2_048]
_lowercase : Tuple = 1_024
# set label information
_lowercase : Dict = 150
_lowercase : Union[str, Any] = 'huggingface/label-files'
_lowercase : str = 'ade20k-id2label.json'
_lowercase : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_lowercase : Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
_lowercase : List[str] = ConvNextConfig(
depths=SCREAMING_SNAKE_CASE , hidden_sizes=SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_lowercase : Union[str, Any] = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE , auxiliary_in_channels=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , )
return config
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Any = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Any = dct.pop(SCREAMING_SNAKE_CASE )
_lowercase : Any = val
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : List[Any] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
_lowercase : Optional[int] = model_name_to_url[model_name]
_lowercase : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
_lowercase : Optional[int] = get_upernet_config(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowercase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
if "bn" in key:
_lowercase : Any = key.replace('bn' , 'batch_norm' )
_lowercase : Any = val
# rename keys
_lowercase : int = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify on image
_lowercase : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
_lowercase : Tuple = SegformerImageProcessor()
_lowercase : Tuple = processor(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowercase : Dict = model(SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
_lowercase : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
_lowercase : Union[str, Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
_lowercase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
_lowercase : Optional[int] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
_lowercase : str = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = "upernet"
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=3_8_4 , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=2_5_5 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowercase : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[Any] = backbone_config.get('model_type' )
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Tuple = config_class.from_dict(_lowerCAmelCase )
_lowercase : Optional[Any] = backbone_config
_lowercase : Any = hidden_size
_lowercase : Any = initializer_range
_lowercase : Tuple = pool_scales
_lowercase : List[Any] = use_auxiliary_head
_lowercase : Optional[Any] = auxiliary_loss_weight
_lowercase : Any = auxiliary_in_channels
_lowercase : Any = auxiliary_channels
_lowercase : List[str] = auxiliary_num_convs
_lowercase : List[str] = auxiliary_concat_input
_lowercase : Tuple = loss_ignore_index
def __a ( self ):
_lowercase : str = copy.deepcopy(self.__dict__ )
_lowercase : Tuple = self.backbone_config.to_dict()
_lowercase : int = self.__class__.model_type
return output
| 66 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCamelCase : Optional[float] = None , _UpperCamelCase : bool = True , ) ->Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
snake_case_ = self.unet.config.sample_size / self.unet.config.sample_rate
snake_case_ = audio_length_in_s * self.unet.config.sample_rate
snake_case_ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
snake_case_ = int(_UpperCamelCase )
if sample_size % down_scale_factor != 0:
snake_case_ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
snake_case_ = int(_UpperCamelCase )
snake_case_ = next(iter(self.unet.parameters() ) ).dtype
snake_case_ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=self.device , dtype=_UpperCamelCase )
# set step values
self.scheduler.set_timesteps(_UpperCamelCase , device=audio.device )
snake_case_ = self.scheduler.timesteps.to(_UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case_ = self.unet(_UpperCamelCase , _UpperCamelCase ).sample
# 2. compute previous image: x_t -> t_t-1
snake_case_ = self.scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
snake_case_ = audio.clamp(-1 , 1 ).float().cpu().numpy()
snake_case_ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_UpperCamelCase ) | 39 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowercase : str = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_lowercase : int = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_lowercase : List[Any] = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.